diff --git a/web/backend/.gitkeep b/web/backend/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/web/backend/pb_data/auxiliary.db b/web/backend/pb_data/auxiliary.db
new file mode 100644
index 00000000..cad5ea92
Binary files /dev/null and b/web/backend/pb_data/auxiliary.db differ
diff --git a/web/backend/pb_data/auxiliary.db-shm b/web/backend/pb_data/auxiliary.db-shm
new file mode 100644
index 00000000..e62e79dd
Binary files /dev/null and b/web/backend/pb_data/auxiliary.db-shm differ
diff --git a/web/backend/pb_data/auxiliary.db-wal b/web/backend/pb_data/auxiliary.db-wal
new file mode 100644
index 00000000..63535c17
Binary files /dev/null and b/web/backend/pb_data/auxiliary.db-wal differ
diff --git a/web/backend/pb_data/data.db b/web/backend/pb_data/data.db
new file mode 100644
index 00000000..ec5c6655
Binary files /dev/null and b/web/backend/pb_data/data.db differ
diff --git a/web/backend/pb_data/data.db-shm b/web/backend/pb_data/data.db-shm
new file mode 100644
index 00000000..a96e2939
Binary files /dev/null and b/web/backend/pb_data/data.db-shm differ
diff --git a/web/backend/pb_data/data.db-wal b/web/backend/pb_data/data.db-wal
new file mode 100644
index 00000000..59350084
Binary files /dev/null and b/web/backend/pb_data/data.db-wal differ
diff --git a/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/image_9l45vdb8qc.webp b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/image_9l45vdb8qc.webp
new file mode 100644
index 00000000..b5684cd2
Binary files /dev/null and b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/image_9l45vdb8qc.webp differ
diff --git a/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/image_9l45vdb8qc.webp.attrs b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/image_9l45vdb8qc.webp.attrs
new file mode 100644
index 00000000..aed45bad
--- /dev/null
+++ b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/image_9l45vdb8qc.webp.attrs
@@ -0,0 +1 @@
+{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/webp","user.metadata":{"original-filename":"image.webp"},"md5":"4vuiFJkERotZ//rKQ1NLug=="}
diff --git a/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/thumbs_image_9l45vdb8qc.webp/100x100_image_9l45vdb8qc.webp b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/thumbs_image_9l45vdb8qc.webp/100x100_image_9l45vdb8qc.webp
new file mode 100644
index 00000000..855245e7
Binary files /dev/null and b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/thumbs_image_9l45vdb8qc.webp/100x100_image_9l45vdb8qc.webp differ
diff --git a/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/thumbs_image_9l45vdb8qc.webp/100x100_image_9l45vdb8qc.webp.attrs b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/thumbs_image_9l45vdb8qc.webp/100x100_image_9l45vdb8qc.webp.attrs
new file mode 100644
index 00000000..cf1bb793
--- /dev/null
+++ b/web/backend/pb_data/storage/_pb_users_auth_/65rhkhm8i3f9slq/thumbs_image_9l45vdb8qc.webp/100x100_image_9l45vdb8qc.webp.attrs
@@ -0,0 +1 @@
+{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/webp","user.metadata":null,"md5":"D9i8C8bRgTIApmXQEVBQMQ=="}
diff --git a/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png
new file mode 100644
index 00000000..e6cb96b9
Binary files /dev/null and b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png differ
diff --git a/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png.attrs b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png.attrs
new file mode 100644
index 00000000..1006759c
--- /dev/null
+++ b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png.attrs
@@ -0,0 +1 @@
+{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/png","user.metadata":{"original-filename":"CleanShot 2025-09-18 at 13.32.55@2x.png"},"md5":"/0+nmrAe0HXihDQ/VrDz0g=="}
diff --git a/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/thumbs_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png/100x100_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/thumbs_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png/100x100_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png
new file mode 100644
index 00000000..1f5b2c01
Binary files /dev/null and b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/thumbs_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png/100x100_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png differ
diff --git a/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/thumbs_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png/100x100_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png.attrs b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/thumbs_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png/100x100_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png.attrs
new file mode 100644
index 00000000..93ae6433
--- /dev/null
+++ b/web/backend/pb_data/storage/pbc_632646243/newnewnewnewnew/thumbs_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png/100x100_clean_shot_2025_09_18_at_13_32_qguoh3xg1u.552x.png.attrs
@@ -0,0 +1 @@
+{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/png","user.metadata":null,"md5":"M/Mn43nnA2xzo/0+DPdHvw=="}
diff --git a/web/backend/pb_data/storage/pbc_632646243/w6tgtw56wrokcdi/homarr_icon_nifcip2nur.svg b/web/backend/pb_data/storage/pbc_632646243/w6tgtw56wrokcdi/homarr_icon_nifcip2nur.svg
new file mode 100644
index 00000000..d3f858ca
--- /dev/null
+++ b/web/backend/pb_data/storage/pbc_632646243/w6tgtw56wrokcdi/homarr_icon_nifcip2nur.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/web/backend/pb_data/storage/pbc_632646243/w6tgtw56wrokcdi/homarr_icon_nifcip2nur.svg.attrs b/web/backend/pb_data/storage/pbc_632646243/w6tgtw56wrokcdi/homarr_icon_nifcip2nur.svg.attrs
new file mode 100644
index 00000000..094d51b8
--- /dev/null
+++ b/web/backend/pb_data/storage/pbc_632646243/w6tgtw56wrokcdi/homarr_icon_nifcip2nur.svg.attrs
@@ -0,0 +1 @@
+{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/svg+xml","user.metadata":{"original-filename":"homarr-icon.svg"},"md5":"ROY4BuPSLxEW+8PvpZxvew=="}
diff --git a/web/backend/pb_data/types.d.ts b/web/backend/pb_data/types.d.ts
new file mode 100644
index 00000000..f49ceabb
--- /dev/null
+++ b/web/backend/pb_data/types.d.ts
@@ -0,0 +1,23816 @@
+// 1757184118
+// GENERATED CODE - DO NOT MODIFY BY HAND
+
+// -------------------------------------------------------------------
+// cronBinds
+// -------------------------------------------------------------------
+
+/**
+ * CronAdd registers a new cron job.
+ *
+ * If a cron job with the specified name already exist, it will be
+ * replaced with the new one.
+ *
+ * Example:
+ *
+ * ```js
+ * // prints "Hello world!" on every 30 minutes
+ * cronAdd("hello", "*\/30 * * * *", () => {
+ * console.log("Hello world!")
+ * })
+ * ```
+ *
+ * _Note that this method is available only in pb_hooks context._
+ *
+ * @group PocketBase
+ */
+declare function cronAdd(
+ jobId: string,
+ cronExpr: string,
+ handler: () => void,
+): void;
+
+/**
+ * CronRemove removes a single registered cron job by its name.
+ *
+ * Example:
+ *
+ * ```js
+ * cronRemove("hello")
+ * ```
+ *
+ * _Note that this method is available only in pb_hooks context._
+ *
+ * @group PocketBase
+ */
+declare function cronRemove(jobId: string): void;
+
+// -------------------------------------------------------------------
+// routerBinds
+// -------------------------------------------------------------------
+
+/**
+ * RouterAdd registers a new route definition.
+ *
+ * Example:
+ *
+ * ```js
+ * routerAdd("GET", "/hello", (e) => {
+ * return e.json(200, {"message": "Hello!"})
+ * }, $apis.requireAuth())
+ * ```
+ *
+ * _Note that this method is available only in pb_hooks context._
+ *
+ * @group PocketBase
+ */
+declare function routerAdd(
+ method: string,
+ path: string,
+ handler: (e: core.RequestEvent) => void,
+ ...middlewares: Array void)|Middleware>,
+): void;
+
+/**
+ * RouterUse registers one or more global middlewares that are executed
+ * along the handler middlewares after a matching route is found.
+ *
+ * Example:
+ *
+ * ```js
+ * routerUse((e) => {
+ * console.log(e.request.url.path)
+ * return e.next()
+ * })
+ * ```
+ *
+ * _Note that this method is available only in pb_hooks context._
+ *
+ * @group PocketBase
+ */
+declare function routerUse(...middlewares: Array void)|Middleware>): void;
+
+// -------------------------------------------------------------------
+// baseBinds
+// -------------------------------------------------------------------
+
+/**
+ * Global helper variable that contains the absolute path to the app pb_hooks directory.
+ *
+ * @group PocketBase
+ */
+declare var __hooks: string
+
+// Utility type to exclude the on* hook methods from a type
+// (hooks are separately generated as global methods).
+//
+// See https://www.typescriptlang.org/docs/handbook/2/mapped-types.html#key-remapping-via-as
+type excludeHooks = {
+ [Property in keyof Type as Exclude]: Type[Property]
+};
+
+// CoreApp without the on* hook methods
+type CoreApp = excludeHooks
+
+// PocketBase without the on* hook methods
+type PocketBase = excludeHooks
+
+/**
+ * `$app` is the current running PocketBase instance that is globally
+ * available in each .pb.js file.
+ *
+ * _Note that this variable is available only in pb_hooks context._
+ *
+ * @namespace
+ * @group PocketBase
+ */
+declare var $app: PocketBase
+
+/**
+ * `$template` is a global helper to load and cache HTML templates on the fly.
+ *
+ * The templates uses the standard Go [html/template](https://pkg.go.dev/html/template)
+ * and [text/template](https://pkg.go.dev/text/template) package syntax.
+ *
+ * Example:
+ *
+ * ```js
+ * const html = $template.loadFiles(
+ * "views/layout.html",
+ * "views/content.html",
+ * ).render({"name": "John"})
+ * ```
+ *
+ * @namespace
+ * @group PocketBase
+ */
+declare var $template: template.Registry
+
+/**
+ * This method is superseded by toString.
+ *
+ * @deprecated
+ * @group PocketBase
+ */
+declare function readerToString(reader: any, maxBytes?: number): string;
+
+/**
+ * toString stringifies the specified value.
+ *
+ * Support optional second maxBytes argument to limit the max read bytes
+ * when the value is a io.Reader (default to 32MB).
+ *
+ * Types that don't have explicit string representation are json serialized.
+ *
+ * Example:
+ *
+ * ```js
+ * // io.Reader
+ * const ex1 = toString(e.request.body)
+ *
+ * // slice of bytes
+ * const ex2 = toString([104 101 108 108 111]) // "hello"
+ *
+ * // null
+ * const ex3 = toString(null) // ""
+ * ```
+ *
+ * @group PocketBase
+ */
+declare function toString(val: any, maxBytes?: number): string;
+
+/**
+ * toBytes converts the specified value into a bytes slice.
+ *
+ * Support optional second maxBytes argument to limit the max read bytes
+ * when the value is a io.Reader (default to 32MB).
+ *
+ * Types that don't have Go slice representation (bool, objects, etc.)
+ * are serialized to UTF8 string and its bytes slice is returned.
+ *
+ * Example:
+ *
+ * ```js
+ * // io.Reader
+ * const ex1 = toBytes(e.request.body)
+ *
+ * // string
+ * const ex2 = toBytes("hello") // [104 101 108 108 111]
+ *
+ * // object (the same as the string '{"test":1}')
+ * const ex3 = toBytes({"test":1}) // [123 34 116 101 115 116 34 58 49 125]
+ *
+ * // null
+ * const ex4 = toBytes(null) // []
+ * ```
+ *
+ * @group PocketBase
+ */
+declare function toBytes(val: any, maxBytes?: number): Array;
+
+/**
+ * sleep pauses the current goroutine for at least the specified user duration (in ms).
+ * A zero or negative duration returns immediately.
+ *
+ * Example:
+ *
+ * ```js
+ * sleep(250) // sleeps for 250ms
+ * ```
+ *
+ * @group PocketBase
+ */
+declare function sleep(milliseconds: number): void;
+
+/**
+ * arrayOf creates a placeholder array of the specified models.
+ * Usually used to populate DB result into an array of models.
+ *
+ * Example:
+ *
+ * ```js
+ * const records = arrayOf(new Record)
+ *
+ * $app.recordQuery("articles").limit(10).all(records)
+ * ```
+ *
+ * @group PocketBase
+ */
+declare function arrayOf(model: T): Array;
+
+/**
+ * DynamicModel creates a new dynamic model with fields from the provided data shape.
+ *
+ * Caveats:
+ * - In order to use 0 as double/float initialization number you have to negate it (`-0`).
+ * - You need to use lowerCamelCase when accessing the model fields (e.g. `model.roles` and not `model.Roles`).
+ *
+ * Example:
+ *
+ * ```js
+ * const model = new DynamicModel({
+ * name: ""
+ * age: 0, // int64
+ * totalSpent: -0, // float64
+ * active: false,
+ * Roles: [], // maps to "Roles" in the DB/JSON but the prop would be accessible via "model.roles"
+ * meta: {}
+ * })
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class DynamicModel {
+ constructor(shape?: { [key:string]: any })
+}
+
+interface Context extends context.Context{} // merge
+/**
+ * Context creates a new empty Go context.Context.
+ *
+ * This is usually used as part of some Go transitive bindings.
+ *
+ * Example:
+ *
+ * ```js
+ * const blank = new Context()
+ *
+ * // with single key-value pair
+ * const base = new Context(null, "a", 123)
+ * console.log(base.value("a")) // 123
+ *
+ * // extend with additional key-value pair
+ * const sub = new Context(base, "b", 456)
+ * console.log(sub.value("a")) // 123
+ * console.log(sub.value("b")) // 456
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class Context implements context.Context {
+ constructor(parentCtx?: Context, key?: any, value?: any)
+}
+
+/**
+ * Record model class.
+ *
+ * ```js
+ * const collection = $app.findCollectionByNameOrId("article")
+ *
+ * const record = new Record(collection, {
+ * title: "Lorem ipsum"
+ * })
+ *
+ * // or set field values after the initialization
+ * record.set("description", "...")
+ * ```
+ *
+ * @group PocketBase
+ */
+declare const Record: {
+ new(collection?: core.Collection, data?: { [key:string]: any }): core.Record
+
+ // note: declare as "newable" const due to conflict with the Record TS utility type
+}
+
+interface Collection extends core.Collection{
+ type: "base" | "view" | "auth"
+} // merge
+/**
+ * Collection model class.
+ *
+ * ```js
+ * const collection = new Collection({
+ * type: "base",
+ * name: "article",
+ * listRule: "@request.auth.id != '' || status = 'public'",
+ * viewRule: "@request.auth.id != '' || status = 'public'",
+ * deleteRule: "@request.auth.id != ''",
+ * fields: [
+ * {
+ * name: "title",
+ * type: "text",
+ * required: true,
+ * min: 6,
+ * max: 100,
+ * },
+ * {
+ * name: "description",
+ * type: "text",
+ * },
+ * ]
+ * })
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class Collection implements core.Collection {
+ constructor(data?: Partial)
+}
+
+interface FieldsList extends core.FieldsList{} // merge
+/**
+ * FieldsList model class, usually used to define the Collection.fields.
+ *
+ * @group PocketBase
+ */
+declare class FieldsList implements core.FieldsList {
+ constructor(data?: Partial)
+}
+
+interface Field extends core.Field{} // merge
+/**
+ * Field model class, usually used as part of the FieldsList model.
+ *
+ * @group PocketBase
+ */
+declare class Field implements core.Field {
+ constructor(data?: Partial)
+}
+
+interface NumberField extends core.NumberField{} // merge
+/**
+ * {@inheritDoc core.NumberField}
+ *
+ * @group PocketBase
+ */
+declare class NumberField implements core.NumberField {
+ constructor(data?: Partial)
+}
+
+interface BoolField extends core.BoolField{} // merge
+/**
+ * {@inheritDoc core.BoolField}
+ *
+ * @group PocketBase
+ */
+declare class BoolField implements core.BoolField {
+ constructor(data?: Partial)
+}
+
+interface TextField extends core.TextField{} // merge
+/**
+ * {@inheritDoc core.TextField}
+ *
+ * @group PocketBase
+ */
+declare class TextField implements core.TextField {
+ constructor(data?: Partial)
+}
+
+interface URLField extends core.URLField{} // merge
+/**
+ * {@inheritDoc core.URLField}
+ *
+ * @group PocketBase
+ */
+declare class URLField implements core.URLField {
+ constructor(data?: Partial)
+}
+
+interface EmailField extends core.EmailField{} // merge
+/**
+ * {@inheritDoc core.EmailField}
+ *
+ * @group PocketBase
+ */
+declare class EmailField implements core.EmailField {
+ constructor(data?: Partial)
+}
+
+interface EditorField extends core.EditorField{} // merge
+/**
+ * {@inheritDoc core.EditorField}
+ *
+ * @group PocketBase
+ */
+declare class EditorField implements core.EditorField {
+ constructor(data?: Partial)
+}
+
+interface PasswordField extends core.PasswordField{} // merge
+/**
+ * {@inheritDoc core.PasswordField}
+ *
+ * @group PocketBase
+ */
+declare class PasswordField implements core.PasswordField {
+ constructor(data?: Partial)
+}
+
+interface DateField extends core.DateField{} // merge
+/**
+ * {@inheritDoc core.DateField}
+ *
+ * @group PocketBase
+ */
+declare class DateField implements core.DateField {
+ constructor(data?: Partial)
+}
+
+interface AutodateField extends core.AutodateField{} // merge
+/**
+ * {@inheritDoc core.AutodateField}
+ *
+ * @group PocketBase
+ */
+declare class AutodateField implements core.AutodateField {
+ constructor(data?: Partial)
+}
+
+interface JSONField extends core.JSONField{} // merge
+/**
+ * {@inheritDoc core.JSONField}
+ *
+ * @group PocketBase
+ */
+declare class JSONField implements core.JSONField {
+ constructor(data?: Partial)
+}
+
+interface RelationField extends core.RelationField{} // merge
+/**
+ * {@inheritDoc core.RelationField}
+ *
+ * @group PocketBase
+ */
+declare class RelationField implements core.RelationField {
+ constructor(data?: Partial)
+}
+
+interface SelectField extends core.SelectField{} // merge
+/**
+ * {@inheritDoc core.SelectField}
+ *
+ * @group PocketBase
+ */
+declare class SelectField implements core.SelectField {
+ constructor(data?: Partial)
+}
+
+interface FileField extends core.FileField{} // merge
+/**
+ * {@inheritDoc core.FileField}
+ *
+ * @group PocketBase
+ */
+declare class FileField implements core.FileField {
+ constructor(data?: Partial)
+}
+
+interface GeoPointField extends core.GeoPointField{} // merge
+/**
+ * {@inheritDoc core.GeoPointField}
+ *
+ * @group PocketBase
+ */
+declare class GeoPointField implements core.GeoPointField {
+ constructor(data?: Partial)
+}
+
+interface MailerMessage extends mailer.Message{} // merge
+/**
+ * MailerMessage defines a single email message.
+ *
+ * ```js
+ * const message = new MailerMessage({
+ * from: {
+ * address: $app.settings().meta.senderAddress,
+ * name: $app.settings().meta.senderName,
+ * },
+ * to: [{address: "test@example.com"}],
+ * subject: "YOUR_SUBJECT...",
+ * html: "YOUR_HTML_BODY...",
+ * })
+ *
+ * $app.newMailClient().send(message)
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class MailerMessage implements mailer.Message {
+ constructor(message?: Partial)
+}
+
+interface Command extends cobra.Command{} // merge
+/**
+ * Command defines a single console command.
+ *
+ * Example:
+ *
+ * ```js
+ * const command = new Command({
+ * use: "hello",
+ * run: (cmd, args) => { console.log("Hello world!") },
+ * })
+ *
+ * $app.rootCmd.addCommand(command);
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class Command implements cobra.Command {
+ constructor(cmd?: Partial)
+}
+
+/**
+ * RequestInfo defines a single core.RequestInfo instance, usually used
+ * as part of various filter checks.
+ *
+ * Example:
+ *
+ * ```js
+ * const authRecord = $app.findAuthRecordByEmail("users", "test@example.com")
+ *
+ * const info = new RequestInfo({
+ * auth: authRecord,
+ * body: {"name": 123},
+ * headers: {"x-token": "..."},
+ * })
+ *
+ * const record = $app.findFirstRecordByData("articles", "slug", "hello")
+ *
+ * const canAccess = $app.canAccessRecord(record, info, "@request.auth.id != '' && @request.body.name = 123")
+ * ```
+ *
+ * @group PocketBase
+ */
+declare const RequestInfo: {
+ new(info?: Partial): core.RequestInfo
+
+ // note: declare as "newable" const due to conflict with the RequestInfo TS node type
+}
+
+/**
+ * Middleware defines a single request middleware handler.
+ *
+ * This class is usually used when you want to explicitly specify a priority to your custom route middleware.
+ *
+ * Example:
+ *
+ * ```js
+ * routerUse(new Middleware((e) => {
+ * console.log(e.request.url.path)
+ * return e.next()
+ * }, -10))
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class Middleware {
+ constructor(
+ func: string|((e: core.RequestEvent) => void),
+ priority?: number,
+ id?: string,
+ )
+}
+
+interface Timezone extends time.Location{} // merge
+/**
+ * Timezone returns the timezone location with the given name.
+ *
+ * The name is expected to be a location name corresponding to a file
+ * in the IANA Time Zone database, such as "America/New_York".
+ *
+ * If the name is "Local", LoadLocation returns Local.
+ *
+ * If the name is "", invalid or "UTC", returns UTC.
+ *
+ * The constructor is equivalent to calling the Go `time.LoadLocation(name)` method.
+ *
+ * Example:
+ *
+ * ```js
+ * const zone = new Timezone("America/New_York")
+ * $app.cron().setTimezone(zone)
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class Timezone implements time.Location {
+ constructor(name?: string)
+}
+
+interface DateTime extends types.DateTime{} // merge
+/**
+ * DateTime defines a single DateTime type instance.
+ * The returned date is always represented in UTC.
+ *
+ * Example:
+ *
+ * ```js
+ * const dt0 = new DateTime() // now
+ *
+ * // full datetime string
+ * const dt1 = new DateTime('2023-07-01 00:00:00.000Z')
+ *
+ * // datetime string with default "parse in" timezone location
+ * //
+ * // similar to new DateTime('2023-07-01 00:00:00 +01:00') or new DateTime('2023-07-01 00:00:00 +02:00')
+ * // but accounts for the daylight saving time (DST)
+ * const dt2 = new DateTime('2023-07-01 00:00:00', 'Europe/Amsterdam')
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class DateTime implements types.DateTime {
+ constructor(date?: string, defaultParseInLocation?: string)
+}
+
+interface ValidationError extends ozzo_validation.Error{} // merge
+/**
+ * ValidationError defines a single formatted data validation error,
+ * usually used as part of an error response.
+ *
+ * ```js
+ * new ValidationError("invalid_title", "Title is not valid")
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class ValidationError implements ozzo_validation.Error {
+ constructor(code?: string, message?: string)
+}
+
+interface Cookie extends http.Cookie{} // merge
+/**
+ * A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
+ * HTTP response.
+ *
+ * Example:
+ *
+ * ```js
+ * routerAdd("POST", "/example", (c) => {
+ * c.setCookie(new Cookie({
+ * name: "example_name",
+ * value: "example_value",
+ * path: "/",
+ * domain: "example.com",
+ * maxAge: 10,
+ * secure: true,
+ * httpOnly: true,
+ * sameSite: 3,
+ * }))
+ *
+ * return c.redirect(200, "/");
+ * })
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class Cookie implements http.Cookie {
+ constructor(options?: Partial)
+}
+
+interface SubscriptionMessage extends subscriptions.Message{} // merge
+/**
+ * SubscriptionMessage defines a realtime subscription payload.
+ *
+ * Example:
+ *
+ * ```js
+ * onRealtimeConnectRequest((e) => {
+ * e.client.send(new SubscriptionMessage({
+ * name: "example",
+ * data: '{"greeting": "Hello world"}'
+ * }))
+ * })
+ * ```
+ *
+ * @group PocketBase
+ */
+declare class SubscriptionMessage implements subscriptions.Message {
+ constructor(options?: Partial)
+}
+
+// -------------------------------------------------------------------
+// dbxBinds
+// -------------------------------------------------------------------
+
+/**
+ * `$dbx` defines common utility for working with the DB abstraction.
+ * For examples and guides please check the [Database guide](https://pocketbase.io/docs/js-database).
+ *
+ * @group PocketBase
+ */
+declare namespace $dbx {
+ /**
+ * {@inheritDoc dbx.HashExp}
+ */
+ export function hashExp(pairs: { [key:string]: any }): dbx.Expression
+
+ let _in: dbx._in
+ export { _in as in }
+
+ export let exp: dbx.newExp
+ export let not: dbx.not
+ export let and: dbx.and
+ export let or: dbx.or
+ export let notIn: dbx.notIn
+ export let like: dbx.like
+ export let orLike: dbx.orLike
+ export let notLike: dbx.notLike
+ export let orNotLike: dbx.orNotLike
+ export let exists: dbx.exists
+ export let notExists: dbx.notExists
+ export let between: dbx.between
+ export let notBetween: dbx.notBetween
+}
+
+// -------------------------------------------------------------------
+// mailsBinds
+// -------------------------------------------------------------------
+
+/**
+ * `$mails` defines helpers to send common
+ * auth records emails like verification, password reset, etc.
+ *
+ * @group PocketBase
+ */
+declare namespace $mails {
+ let sendRecordPasswordReset: mails.sendRecordPasswordReset
+ let sendRecordVerification: mails.sendRecordVerification
+ let sendRecordChangeEmail: mails.sendRecordChangeEmail
+ let sendRecordOTP: mails.sendRecordOTP
+}
+
+// -------------------------------------------------------------------
+// securityBinds
+// -------------------------------------------------------------------
+
+/**
+ * `$security` defines low level helpers for creating
+ * and parsing JWTs, random string generation, AES encryption, etc.
+ *
+ * @group PocketBase
+ */
+declare namespace $security {
+ let randomString: security.randomString
+ let randomStringWithAlphabet: security.randomStringWithAlphabet
+ let randomStringByRegex: security.randomStringByRegex
+ let pseudorandomString: security.pseudorandomString
+ let pseudorandomStringWithAlphabet: security.pseudorandomStringWithAlphabet
+ let encrypt: security.encrypt
+ let decrypt: security.decrypt
+ let hs256: security.hs256
+ let hs512: security.hs512
+ let equal: security.equal
+ let md5: security.md5
+ let sha256: security.sha256
+ let sha512: security.sha512
+
+ /**
+ * {@inheritDoc security.newJWT}
+ */
+ export function createJWT(payload: { [key:string]: any }, signingKey: string, secDuration: number): string
+
+ /**
+ * {@inheritDoc security.parseUnverifiedJWT}
+ */
+ export function parseUnverifiedJWT(token: string): _TygojaDict
+
+ /**
+ * {@inheritDoc security.parseJWT}
+ */
+ export function parseJWT(token: string, verificationKey: string): _TygojaDict
+}
+
+// -------------------------------------------------------------------
+// filesystemBinds
+// -------------------------------------------------------------------
+
+/**
+ * `$filesystem` defines common helpers for working
+ * with the PocketBase filesystem abstraction.
+ *
+ * @group PocketBase
+ */
+declare namespace $filesystem {
+ let fileFromPath: filesystem.newFileFromPath
+ let fileFromBytes: filesystem.newFileFromBytes
+ let fileFromMultipart: filesystem.newFileFromMultipart
+
+ /**
+ * fileFromURL creates a new File from the provided url by
+ * downloading the resource and creating a BytesReader.
+ *
+ * Example:
+ *
+ * ```js
+ * // with default max timeout of 120sec
+ * const file1 = $filesystem.fileFromURL("https://...")
+ *
+ * // with custom timeout of 15sec
+ * const file2 = $filesystem.fileFromURL("https://...", 15)
+ * ```
+ */
+ export function fileFromURL(url: string, secTimeout?: number): filesystem.File
+}
+
+// -------------------------------------------------------------------
+// filepathBinds
+// -------------------------------------------------------------------
+
+/**
+ * `$filepath` defines common helpers for manipulating filename
+ * paths in a way compatible with the target operating system-defined file paths.
+ *
+ * @group PocketBase
+ */
+declare namespace $filepath {
+ export let base: filepath.base
+ export let clean: filepath.clean
+ export let dir: filepath.dir
+ export let ext: filepath.ext
+ export let fromSlash: filepath.fromSlash
+ export let glob: filepath.glob
+ export let isAbs: filepath.isAbs
+ export let join: filepath.join
+ export let match: filepath.match
+ export let rel: filepath.rel
+ export let split: filepath.split
+ export let splitList: filepath.splitList
+ export let toSlash: filepath.toSlash
+ export let walk: filepath.walk
+ export let walkDir: filepath.walkDir
+}
+
+// -------------------------------------------------------------------
+// osBinds
+// -------------------------------------------------------------------
+
+/**
+ * `$os` defines common helpers for working with the OS level primitives
+ * (eg. deleting directories, executing shell commands, etc.).
+ *
+ * @group PocketBase
+ */
+declare namespace $os {
+ /**
+ * Legacy alias for $os.cmd().
+ */
+ export let exec: exec.command
+
+ /**
+ * Prepares an external OS command.
+ *
+ * Example:
+ *
+ * ```js
+ * // prepare the command to execute
+ * const cmd = $os.cmd('ls', '-sl')
+ *
+ * // execute the command and return its standard output as string
+ * const output = toString(cmd.output());
+ * ```
+ */
+ export let cmd: exec.command
+
+ /**
+ * Args hold the command-line arguments, starting with the program name.
+ */
+ export let args: Array
+
+ export let exit: os.exit
+ export let getenv: os.getenv
+ export let dirFS: os.dirFS
+ export let readFile: os.readFile
+ export let writeFile: os.writeFile
+ export let stat: os.stat
+ export let readDir: os.readDir
+ export let tempDir: os.tempDir
+ export let truncate: os.truncate
+ export let getwd: os.getwd
+ export let mkdir: os.mkdir
+ export let mkdirAll: os.mkdirAll
+ export let rename: os.rename
+ export let remove: os.remove
+ export let removeAll: os.removeAll
+ export let openRoot: os.openRoot
+ export let openInRoot: os.openInRoot
+}
+
+// -------------------------------------------------------------------
+// formsBinds
+// -------------------------------------------------------------------
+
+interface AppleClientSecretCreateForm extends forms.AppleClientSecretCreate{} // merge
+/**
+ * @inheritDoc
+ * @group PocketBase
+ */
+declare class AppleClientSecretCreateForm implements forms.AppleClientSecretCreate {
+ constructor(app: CoreApp)
+}
+
+interface RecordUpsertForm extends forms.RecordUpsert{} // merge
+/**
+ * @inheritDoc
+ * @group PocketBase
+ */
+declare class RecordUpsertForm implements forms.RecordUpsert {
+ constructor(app: CoreApp, record: core.Record)
+}
+
+interface TestEmailSendForm extends forms.TestEmailSend{} // merge
+/**
+ * @inheritDoc
+ * @group PocketBase
+ */
+declare class TestEmailSendForm implements forms.TestEmailSend {
+ constructor(app: CoreApp)
+}
+
+interface TestS3FilesystemForm extends forms.TestS3Filesystem{} // merge
+/**
+ * @inheritDoc
+ * @group PocketBase
+ */
+declare class TestS3FilesystemForm implements forms.TestS3Filesystem {
+ constructor(app: CoreApp)
+}
+
+// -------------------------------------------------------------------
+// apisBinds
+// -------------------------------------------------------------------
+
+interface ApiError extends router.ApiError{} // merge
+/**
+ * @inheritDoc
+ *
+ * @group PocketBase
+ */
+declare class ApiError implements router.ApiError {
+ constructor(status?: number, message?: string, data?: any)
+}
+
+interface NotFoundError extends router.ApiError{} // merge
+/**
+ * NotFounderor returns 404 ApiError.
+ *
+ * @group PocketBase
+ */
+declare class NotFoundError implements router.ApiError {
+ constructor(message?: string, data?: any)
+}
+
+interface BadRequestError extends router.ApiError{} // merge
+/**
+ * BadRequestError returns 400 ApiError.
+ *
+ * @group PocketBase
+ */
+declare class BadRequestError implements router.ApiError {
+ constructor(message?: string, data?: any)
+}
+
+interface ForbiddenError extends router.ApiError{} // merge
+/**
+ * ForbiddenError returns 403 ApiError.
+ *
+ * @group PocketBase
+ */
+declare class ForbiddenError implements router.ApiError {
+ constructor(message?: string, data?: any)
+}
+
+interface UnauthorizedError extends router.ApiError{} // merge
+/**
+ * UnauthorizedError returns 401 ApiError.
+ *
+ * @group PocketBase
+ */
+declare class UnauthorizedError implements router.ApiError {
+ constructor(message?: string, data?: any)
+}
+
+interface TooManyRequestsError extends router.ApiError{} // merge
+/**
+ * TooManyRequestsError returns 429 ApiError.
+ *
+ * @group PocketBase
+ */
+declare class TooManyRequestsError implements router.ApiError {
+ constructor(message?: string, data?: any)
+}
+
+interface InternalServerError extends router.ApiError{} // merge
+/**
+ * InternalServerError returns 429 ApiError.
+ *
+ * @group PocketBase
+ */
+declare class InternalServerError implements router.ApiError {
+ constructor(message?: string, data?: any)
+}
+
+/**
+ * `$apis` defines commonly used PocketBase api helpers and middlewares.
+ *
+ * @group PocketBase
+ */
+declare namespace $apis {
+ /**
+ * Route handler to serve static directory content (html, js, css, etc.).
+ *
+ * If a file resource is missing and indexFallback is set, the request
+ * will be forwarded to the base index.html (useful for SPA).
+ */
+ export function static(dir: string, indexFallback: boolean): (e: core.RequestEvent) => void
+
+ let requireGuestOnly: apis.requireGuestOnly
+ let requireAuth: apis.requireAuth
+ let requireSuperuserAuth: apis.requireSuperuserAuth
+ let requireSuperuserOrOwnerAuth: apis.requireSuperuserOrOwnerAuth
+ let skipSuccessActivityLog: apis.skipSuccessActivityLog
+ let gzip: apis.gzip
+ let bodyLimit: apis.bodyLimit
+ let enrichRecord: apis.enrichRecord
+ let enrichRecords: apis.enrichRecords
+
+ /**
+ * RecordAuthResponse writes standardized json record auth response
+ * into the specified request event.
+ *
+ * The authMethod argument specify the name of the current authentication method (eg. password, oauth2, etc.)
+ * that it is used primarily as an auth identifier during MFA and for login alerts.
+ *
+ * Set authMethod to empty string if you want to ignore the MFA checks and the login alerts
+ * (can be also adjusted additionally via the onRecordAuthRequest hook).
+ */
+ export function recordAuthResponse(e: core.RequestEvent, authRecord: core.Record, authMethod: string, meta?: any): void
+}
+
+// -------------------------------------------------------------------
+// httpClientBinds
+// -------------------------------------------------------------------
+
+// extra FormData overload to prevent TS warnings when used with non File/Blob value.
+interface FormData {
+ append(key:string, value:any): void
+ set(key:string, value:any): void
+}
+
+/**
+ * `$http` defines common methods for working with HTTP requests.
+ *
+ * @group PocketBase
+ */
+declare namespace $http {
+ /**
+ * Sends a single HTTP request.
+ *
+ * Example:
+ *
+ * ```js
+ * const res = $http.send({
+ * method: "POST",
+ * url: "https://example.com",
+ * body: JSON.stringify({"title": "test"}),
+ * headers: { 'Content-Type': 'application/json' }
+ * })
+ *
+ * console.log(res.statusCode) // the response HTTP status code
+ * console.log(res.headers) // the response headers (eg. res.headers['X-Custom'][0])
+ * console.log(res.cookies) // the response cookies (eg. res.cookies.sessionId.value)
+ * console.log(res.body) // the response body as raw bytes slice
+ * console.log(res.json) // the response body as parsed json array or map
+ * ```
+ */
+ function send(config: {
+ url: string,
+ body?: string|FormData,
+ method?: string, // default to "GET"
+ headers?: { [key:string]: string },
+ timeout?: number, // default to 120
+
+ // @deprecated please use body instead
+ data?: { [key:string]: any },
+ }): {
+ statusCode: number,
+ headers: { [key:string]: Array },
+ cookies: { [key:string]: http.Cookie },
+ json: any,
+ body: Array,
+
+ // @deprecated please use toString(result.body) instead
+ raw: string,
+ };
+}
+
+// -------------------------------------------------------------------
+// migrate only
+// -------------------------------------------------------------------
+
+/**
+ * Migrate defines a single migration upgrade/downgrade action.
+ *
+ * _Note that this method is available only in pb_migrations context._
+ *
+ * @group PocketBase
+ */
+declare function migrate(
+ up: (txApp: CoreApp) => void,
+ down?: (txApp: CoreApp) => void
+): void;
+/** @group PocketBase */declare function onBackupCreate(handler: (e: core.BackupEvent) => void): void
+/** @group PocketBase */declare function onBackupRestore(handler: (e: core.BackupEvent) => void): void
+/** @group PocketBase */declare function onBatchRequest(handler: (e: core.BatchRequestEvent) => void): void
+/** @group PocketBase */declare function onBootstrap(handler: (e: core.BootstrapEvent) => void): void
+/** @group PocketBase */declare function onCollectionAfterCreateError(handler: (e: core.CollectionErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionAfterCreateSuccess(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionAfterDeleteError(handler: (e: core.CollectionErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionAfterDeleteSuccess(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionAfterUpdateError(handler: (e: core.CollectionErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionAfterUpdateSuccess(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionCreate(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionCreateExecute(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionCreateRequest(handler: (e: core.CollectionRequestEvent) => void): void
+/** @group PocketBase */declare function onCollectionDelete(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionDeleteExecute(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionDeleteRequest(handler: (e: core.CollectionRequestEvent) => void): void
+/** @group PocketBase */declare function onCollectionUpdate(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionUpdateExecute(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionUpdateRequest(handler: (e: core.CollectionRequestEvent) => void): void
+/** @group PocketBase */declare function onCollectionValidate(handler: (e: core.CollectionEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onCollectionViewRequest(handler: (e: core.CollectionRequestEvent) => void): void
+/** @group PocketBase */declare function onCollectionsImportRequest(handler: (e: core.CollectionsImportRequestEvent) => void): void
+/** @group PocketBase */declare function onCollectionsListRequest(handler: (e: core.CollectionsListRequestEvent) => void): void
+/** @group PocketBase */declare function onFileDownloadRequest(handler: (e: core.FileDownloadRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onFileTokenRequest(handler: (e: core.FileTokenRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onMailerRecordAuthAlertSend(handler: (e: core.MailerRecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onMailerRecordEmailChangeSend(handler: (e: core.MailerRecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onMailerRecordOTPSend(handler: (e: core.MailerRecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onMailerRecordPasswordResetSend(handler: (e: core.MailerRecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onMailerRecordVerificationSend(handler: (e: core.MailerRecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onMailerSend(handler: (e: core.MailerEvent) => void): void
+/** @group PocketBase */declare function onModelAfterCreateError(handler: (e: core.ModelErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelAfterCreateSuccess(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelAfterDeleteError(handler: (e: core.ModelErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelAfterDeleteSuccess(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelAfterUpdateError(handler: (e: core.ModelErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelAfterUpdateSuccess(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelCreate(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelCreateExecute(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelDelete(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelDeleteExecute(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelUpdate(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelUpdateExecute(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onModelValidate(handler: (e: core.ModelEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRealtimeConnectRequest(handler: (e: core.RealtimeConnectRequestEvent) => void): void
+/** @group PocketBase */declare function onRealtimeMessageSend(handler: (e: core.RealtimeMessageEvent) => void): void
+/** @group PocketBase */declare function onRealtimeSubscribeRequest(handler: (e: core.RealtimeSubscribeRequestEvent) => void): void
+/** @group PocketBase */declare function onRecordAfterCreateError(handler: (e: core.RecordErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAfterCreateSuccess(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAfterDeleteError(handler: (e: core.RecordErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAfterDeleteSuccess(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAfterUpdateError(handler: (e: core.RecordErrorEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAfterUpdateSuccess(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAuthRefreshRequest(handler: (e: core.RecordAuthRefreshRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAuthRequest(handler: (e: core.RecordAuthRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAuthWithOAuth2Request(handler: (e: core.RecordAuthWithOAuth2RequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAuthWithOTPRequest(handler: (e: core.RecordAuthWithOTPRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordAuthWithPasswordRequest(handler: (e: core.RecordAuthWithPasswordRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordConfirmEmailChangeRequest(handler: (e: core.RecordConfirmEmailChangeRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordConfirmPasswordResetRequest(handler: (e: core.RecordConfirmPasswordResetRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordConfirmVerificationRequest(handler: (e: core.RecordConfirmVerificationRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordCreate(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordCreateExecute(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordCreateRequest(handler: (e: core.RecordRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordDelete(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordDeleteExecute(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordDeleteRequest(handler: (e: core.RecordRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordEnrich(handler: (e: core.RecordEnrichEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordRequestEmailChangeRequest(handler: (e: core.RecordRequestEmailChangeRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordRequestOTPRequest(handler: (e: core.RecordCreateOTPRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordRequestPasswordResetRequest(handler: (e: core.RecordRequestPasswordResetRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordRequestVerificationRequest(handler: (e: core.RecordRequestVerificationRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordUpdate(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordUpdateExecute(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordUpdateRequest(handler: (e: core.RecordRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordValidate(handler: (e: core.RecordEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordViewRequest(handler: (e: core.RecordRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onRecordsListRequest(handler: (e: core.RecordsListRequestEvent) => void, ...tags: string[]): void
+/** @group PocketBase */declare function onSettingsListRequest(handler: (e: core.SettingsListRequestEvent) => void): void
+/** @group PocketBase */declare function onSettingsReload(handler: (e: core.SettingsReloadEvent) => void): void
+/** @group PocketBase */declare function onSettingsUpdateRequest(handler: (e: core.SettingsUpdateRequestEvent) => void): void
+/** @group PocketBase */declare function onTerminate(handler: (e: core.TerminateEvent) => void): void
+type _TygojaDict = { [key:string | number | symbol]: any; }
+type _TygojaAny = any
+
+/**
+ * Package os provides a platform-independent interface to operating system
+ * functionality. The design is Unix-like, although the error handling is
+ * Go-like; failing calls return values of type error rather than error numbers.
+ * Often, more information is available within the error. For example,
+ * if a call that takes a file name fails, such as [Open] or [Stat], the error
+ * will include the failing file name when printed and will be of type
+ * [*PathError], which may be unpacked for more information.
+ *
+ * The os interface is intended to be uniform across all operating systems.
+ * Features not generally available appear in the system-specific package syscall.
+ *
+ * Here is a simple example, opening a file and reading some of it.
+ *
+ * ```
+ * file, err := os.Open("file.go") // For read access.
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * If the open fails, the error string will be self-explanatory, like
+ *
+ * ```
+ * open file.go: no such file or directory
+ * ```
+ *
+ * The file's data can then be read into a slice of bytes. Read and
+ * Write take their byte counts from the length of the argument slice.
+ *
+ * ```
+ * data := make([]byte, 100)
+ * count, err := file.Read(data)
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * fmt.Printf("read %d bytes: %q\n", count, data[:count])
+ * ```
+ *
+ * # Concurrency
+ *
+ * The methods of [File] correspond to file system operations. All are
+ * safe for concurrent use. The maximum number of concurrent
+ * operations on a File may be limited by the OS or the system. The
+ * number should be high, but exceeding it may degrade performance or
+ * cause other issues.
+ */
+namespace os {
+ interface readdirMode extends Number{}
+ interface File {
+ /**
+ * Readdir reads the contents of the directory associated with file and
+ * returns a slice of up to n [FileInfo] values, as would be returned
+ * by [Lstat], in directory order. Subsequent calls on the same file will yield
+ * further FileInfos.
+ *
+ * If n > 0, Readdir returns at most n FileInfo structures. In this case, if
+ * Readdir returns an empty slice, it will return a non-nil error
+ * explaining why. At the end of a directory, the error is [io.EOF].
+ *
+ * If n <= 0, Readdir returns all the FileInfo from the directory in
+ * a single slice. In this case, if Readdir succeeds (reads all
+ * the way to the end of the directory), it returns the slice and a
+ * nil error. If it encounters an error before the end of the
+ * directory, Readdir returns the FileInfo read until that point
+ * and a non-nil error.
+ *
+ * Most clients are better served by the more efficient ReadDir method.
+ */
+ readdir(n: number): Array
+ }
+ interface File {
+ /**
+ * Readdirnames reads the contents of the directory associated with file
+ * and returns a slice of up to n names of files in the directory,
+ * in directory order. Subsequent calls on the same file will yield
+ * further names.
+ *
+ * If n > 0, Readdirnames returns at most n names. In this case, if
+ * Readdirnames returns an empty slice, it will return a non-nil error
+ * explaining why. At the end of a directory, the error is [io.EOF].
+ *
+ * If n <= 0, Readdirnames returns all the names from the directory in
+ * a single slice. In this case, if Readdirnames succeeds (reads all
+ * the way to the end of the directory), it returns the slice and a
+ * nil error. If it encounters an error before the end of the
+ * directory, Readdirnames returns the names read until that point and
+ * a non-nil error.
+ */
+ readdirnames(n: number): Array
+ }
+ /**
+ * A DirEntry is an entry read from a directory
+ * (using the [ReadDir] function or a [File.ReadDir] method).
+ */
+ interface DirEntry extends fs.DirEntry{}
+ interface File {
+ /**
+ * ReadDir reads the contents of the directory associated with the file f
+ * and returns a slice of [DirEntry] values in directory order.
+ * Subsequent calls on the same file will yield later DirEntry records in the directory.
+ *
+ * If n > 0, ReadDir returns at most n DirEntry records.
+ * In this case, if ReadDir returns an empty slice, it will return an error explaining why.
+ * At the end of a directory, the error is [io.EOF].
+ *
+ * If n <= 0, ReadDir returns all the DirEntry records remaining in the directory.
+ * When it succeeds, it returns a nil error (not io.EOF).
+ */
+ readDir(n: number): Array
+ }
+ interface readDir {
+ /**
+ * ReadDir reads the named directory,
+ * returning all its directory entries sorted by filename.
+ * If an error occurs reading the directory,
+ * ReadDir returns the entries it was able to read before the error,
+ * along with the error.
+ */
+ (name: string): Array
+ }
+ interface copyFS {
+ /**
+ * CopyFS copies the file system fsys into the directory dir,
+ * creating dir if necessary.
+ *
+ * Files are created with mode 0o666 plus any execute permissions
+ * from the source, and directories are created with mode 0o777
+ * (before umask).
+ *
+ * CopyFS will not overwrite existing files. If a file name in fsys
+ * already exists in the destination, CopyFS will return an error
+ * such that errors.Is(err, fs.ErrExist) will be true.
+ *
+ * Symbolic links in fsys are not supported. A *PathError with Err set
+ * to ErrInvalid is returned when copying from a symbolic link.
+ *
+ * Symbolic links in dir are followed.
+ *
+ * New files added to fsys (including if dir is a subdirectory of fsys)
+ * while CopyFS is running are not guaranteed to be copied.
+ *
+ * Copying stops at and returns the first error encountered.
+ */
+ (dir: string, fsys: fs.FS): void
+ }
+ /**
+ * Auxiliary information if the File describes a directory
+ */
+ interface dirInfo {
+ }
+ interface expand {
+ /**
+ * Expand replaces ${var} or $var in the string based on the mapping function.
+ * For example, [os.ExpandEnv](s) is equivalent to [os.Expand](s, [os.Getenv]).
+ */
+ (s: string, mapping: (_arg0: string) => string): string
+ }
+ interface expandEnv {
+ /**
+ * ExpandEnv replaces ${var} or $var in the string according to the values
+ * of the current environment variables. References to undefined
+ * variables are replaced by the empty string.
+ */
+ (s: string): string
+ }
+ interface getenv {
+ /**
+ * Getenv retrieves the value of the environment variable named by the key.
+ * It returns the value, which will be empty if the variable is not present.
+ * To distinguish between an empty value and an unset value, use [LookupEnv].
+ */
+ (key: string): string
+ }
+ interface lookupEnv {
+ /**
+ * LookupEnv retrieves the value of the environment variable named
+ * by the key. If the variable is present in the environment the
+ * value (which may be empty) is returned and the boolean is true.
+ * Otherwise the returned value will be empty and the boolean will
+ * be false.
+ */
+ (key: string): [string, boolean]
+ }
+ interface setenv {
+ /**
+ * Setenv sets the value of the environment variable named by the key.
+ * It returns an error, if any.
+ */
+ (key: string, value: string): void
+ }
+ interface unsetenv {
+ /**
+ * Unsetenv unsets a single environment variable.
+ */
+ (key: string): void
+ }
+ interface clearenv {
+ /**
+ * Clearenv deletes all environment variables.
+ */
+ (): void
+ }
+ interface environ {
+ /**
+ * Environ returns a copy of strings representing the environment,
+ * in the form "key=value".
+ */
+ (): Array
+ }
+ interface timeout {
+ [key:string]: any;
+ timeout(): boolean
+ }
+ /**
+ * PathError records an error and the operation and file path that caused it.
+ */
+ interface PathError extends fs.PathError{}
+ /**
+ * SyscallError records an error from a specific system call.
+ */
+ interface SyscallError {
+ syscall: string
+ err: Error
+ }
+ interface SyscallError {
+ error(): string
+ }
+ interface SyscallError {
+ unwrap(): void
+ }
+ interface SyscallError {
+ /**
+ * Timeout reports whether this error represents a timeout.
+ */
+ timeout(): boolean
+ }
+ interface newSyscallError {
+ /**
+ * NewSyscallError returns, as an error, a new [SyscallError]
+ * with the given system call name and error details.
+ * As a convenience, if err is nil, NewSyscallError returns nil.
+ */
+ (syscall: string, err: Error): void
+ }
+ interface isExist {
+ /**
+ * IsExist returns a boolean indicating whether its argument is known to report
+ * that a file or directory already exists. It is satisfied by [ErrExist] as
+ * well as some syscall errors.
+ *
+ * This function predates [errors.Is]. It only supports errors returned by
+ * the os package. New code should use errors.Is(err, fs.ErrExist).
+ */
+ (err: Error): boolean
+ }
+ interface isNotExist {
+ /**
+ * IsNotExist returns a boolean indicating whether its argument is known to
+ * report that a file or directory does not exist. It is satisfied by
+ * [ErrNotExist] as well as some syscall errors.
+ *
+ * This function predates [errors.Is]. It only supports errors returned by
+ * the os package. New code should use errors.Is(err, fs.ErrNotExist).
+ */
+ (err: Error): boolean
+ }
+ interface isPermission {
+ /**
+ * IsPermission returns a boolean indicating whether its argument is known to
+ * report that permission is denied. It is satisfied by [ErrPermission] as well
+ * as some syscall errors.
+ *
+ * This function predates [errors.Is]. It only supports errors returned by
+ * the os package. New code should use errors.Is(err, fs.ErrPermission).
+ */
+ (err: Error): boolean
+ }
+ interface isTimeout {
+ /**
+ * IsTimeout returns a boolean indicating whether its argument is known
+ * to report that a timeout occurred.
+ *
+ * This function predates [errors.Is], and the notion of whether an
+ * error indicates a timeout can be ambiguous. For example, the Unix
+ * error EWOULDBLOCK sometimes indicates a timeout and sometimes does not.
+ * New code should use errors.Is with a value appropriate to the call
+ * returning the error, such as [os.ErrDeadlineExceeded].
+ */
+ (err: Error): boolean
+ }
+ interface syscallErrorType extends syscall.Errno{}
+ interface processMode extends Number{}
+ interface processStatus extends Number{}
+ /**
+ * Process stores the information about a process created by [StartProcess].
+ */
+ interface Process {
+ pid: number
+ }
+ /**
+ * ProcAttr holds the attributes that will be applied to a new process
+ * started by StartProcess.
+ */
+ interface ProcAttr {
+ /**
+ * If Dir is non-empty, the child changes into the directory before
+ * creating the process.
+ */
+ dir: string
+ /**
+ * If Env is non-nil, it gives the environment variables for the
+ * new process in the form returned by Environ.
+ * If it is nil, the result of Environ will be used.
+ */
+ env: Array
+ /**
+ * Files specifies the open files inherited by the new process. The
+ * first three entries correspond to standard input, standard output, and
+ * standard error. An implementation may support additional entries,
+ * depending on the underlying operating system. A nil entry corresponds
+ * to that file being closed when the process starts.
+ * On Unix systems, StartProcess will change these File values
+ * to blocking mode, which means that SetDeadline will stop working
+ * and calling Close will not interrupt a Read or Write.
+ */
+ files: Array<(File | undefined)>
+ /**
+ * Operating system-specific process creation attributes.
+ * Note that setting this field means that your program
+ * may not execute properly or even compile on some
+ * operating systems.
+ */
+ sys?: syscall.SysProcAttr
+ }
+ /**
+ * A Signal represents an operating system signal.
+ * The usual underlying implementation is operating system-dependent:
+ * on Unix it is syscall.Signal.
+ */
+ interface Signal {
+ [key:string]: any;
+ string(): string
+ signal(): void // to distinguish from other Stringers
+ }
+ interface getpid {
+ /**
+ * Getpid returns the process id of the caller.
+ */
+ (): number
+ }
+ interface getppid {
+ /**
+ * Getppid returns the process id of the caller's parent.
+ */
+ (): number
+ }
+ interface findProcess {
+ /**
+ * FindProcess looks for a running process by its pid.
+ *
+ * The [Process] it returns can be used to obtain information
+ * about the underlying operating system process.
+ *
+ * On Unix systems, FindProcess always succeeds and returns a Process
+ * for the given pid, regardless of whether the process exists. To test whether
+ * the process actually exists, see whether p.Signal(syscall.Signal(0)) reports
+ * an error.
+ */
+ (pid: number): (Process)
+ }
+ interface startProcess {
+ /**
+ * StartProcess starts a new process with the program, arguments and attributes
+ * specified by name, argv and attr. The argv slice will become [os.Args] in the
+ * new process, so it normally starts with the program name.
+ *
+ * If the calling goroutine has locked the operating system thread
+ * with [runtime.LockOSThread] and modified any inheritable OS-level
+ * thread state (for example, Linux or Plan 9 name spaces), the new
+ * process will inherit the caller's thread state.
+ *
+ * StartProcess is a low-level interface. The [os/exec] package provides
+ * higher-level interfaces.
+ *
+ * If there is an error, it will be of type [*PathError].
+ */
+ (name: string, argv: Array, attr: ProcAttr): (Process)
+ }
+ interface Process {
+ /**
+ * Release releases any resources associated with the [Process] p,
+ * rendering it unusable in the future.
+ * Release only needs to be called if [Process.Wait] is not.
+ */
+ release(): void
+ }
+ interface Process {
+ /**
+ * Kill causes the [Process] to exit immediately. Kill does not wait until
+ * the Process has actually exited. This only kills the Process itself,
+ * not any other processes it may have started.
+ */
+ kill(): void
+ }
+ interface Process {
+ /**
+ * Wait waits for the [Process] to exit, and then returns a
+ * ProcessState describing its status and an error, if any.
+ * Wait releases any resources associated with the Process.
+ * On most operating systems, the Process must be a child
+ * of the current process or an error will be returned.
+ */
+ wait(): (ProcessState)
+ }
+ interface Process {
+ /**
+ * Signal sends a signal to the [Process].
+ * Sending [Interrupt] on Windows is not implemented.
+ */
+ signal(sig: Signal): void
+ }
+ interface ProcessState {
+ /**
+ * UserTime returns the user CPU time of the exited process and its children.
+ */
+ userTime(): time.Duration
+ }
+ interface ProcessState {
+ /**
+ * SystemTime returns the system CPU time of the exited process and its children.
+ */
+ systemTime(): time.Duration
+ }
+ interface ProcessState {
+ /**
+ * Exited reports whether the program has exited.
+ * On Unix systems this reports true if the program exited due to calling exit,
+ * but false if the program terminated due to a signal.
+ */
+ exited(): boolean
+ }
+ interface ProcessState {
+ /**
+ * Success reports whether the program exited successfully,
+ * such as with exit status 0 on Unix.
+ */
+ success(): boolean
+ }
+ interface ProcessState {
+ /**
+ * Sys returns system-dependent exit information about
+ * the process. Convert it to the appropriate underlying
+ * type, such as [syscall.WaitStatus] on Unix, to access its contents.
+ */
+ sys(): any
+ }
+ interface ProcessState {
+ /**
+ * SysUsage returns system-dependent resource usage information about
+ * the exited process. Convert it to the appropriate underlying
+ * type, such as [*syscall.Rusage] on Unix, to access its contents.
+ * (On Unix, *syscall.Rusage matches struct rusage as defined in the
+ * getrusage(2) manual page.)
+ */
+ sysUsage(): any
+ }
+ /**
+ * ProcessState stores information about a process, as reported by Wait.
+ */
+ interface ProcessState {
+ }
+ interface ProcessState {
+ /**
+ * Pid returns the process id of the exited process.
+ */
+ pid(): number
+ }
+ interface ProcessState {
+ string(): string
+ }
+ interface ProcessState {
+ /**
+ * ExitCode returns the exit code of the exited process, or -1
+ * if the process hasn't exited or was terminated by a signal.
+ */
+ exitCode(): number
+ }
+ interface executable {
+ /**
+ * Executable returns the path name for the executable that started
+ * the current process. There is no guarantee that the path is still
+ * pointing to the correct executable. If a symlink was used to start
+ * the process, depending on the operating system, the result might
+ * be the symlink or the path it pointed to. If a stable result is
+ * needed, [path/filepath.EvalSymlinks] might help.
+ *
+ * Executable returns an absolute path unless an error occurred.
+ *
+ * The main use case is finding resources located relative to an
+ * executable.
+ */
+ (): string
+ }
+ interface File {
+ /**
+ * Name returns the name of the file as presented to Open.
+ *
+ * It is safe to call Name after [Close].
+ */
+ name(): string
+ }
+ /**
+ * LinkError records an error during a link or symlink or rename
+ * system call and the paths that caused it.
+ */
+ interface LinkError {
+ op: string
+ old: string
+ new: string
+ err: Error
+ }
+ interface LinkError {
+ error(): string
+ }
+ interface LinkError {
+ unwrap(): void
+ }
+ interface File {
+ /**
+ * Read reads up to len(b) bytes from the File and stores them in b.
+ * It returns the number of bytes read and any error encountered.
+ * At end of file, Read returns 0, io.EOF.
+ */
+ read(b: string|Array): number
+ }
+ interface File {
+ /**
+ * ReadAt reads len(b) bytes from the File starting at byte offset off.
+ * It returns the number of bytes read and the error, if any.
+ * ReadAt always returns a non-nil error when n < len(b).
+ * At end of file, that error is io.EOF.
+ */
+ readAt(b: string|Array, off: number): number
+ }
+ interface File {
+ /**
+ * ReadFrom implements io.ReaderFrom.
+ */
+ readFrom(r: io.Reader): number
+ }
+ /**
+ * noReadFrom can be embedded alongside another type to
+ * hide the ReadFrom method of that other type.
+ */
+ interface noReadFrom {
+ }
+ interface noReadFrom {
+ /**
+ * ReadFrom hides another ReadFrom method.
+ * It should never be called.
+ */
+ readFrom(_arg0: io.Reader): number
+ }
+ /**
+ * fileWithoutReadFrom implements all the methods of *File other
+ * than ReadFrom. This is used to permit ReadFrom to call io.Copy
+ * without leading to a recursive call to ReadFrom.
+ */
+ type _sMfsKhK = noReadFrom&File
+ interface fileWithoutReadFrom extends _sMfsKhK {
+ }
+ interface File {
+ /**
+ * Write writes len(b) bytes from b to the File.
+ * It returns the number of bytes written and an error, if any.
+ * Write returns a non-nil error when n != len(b).
+ */
+ write(b: string|Array): number
+ }
+ interface File {
+ /**
+ * WriteAt writes len(b) bytes to the File starting at byte offset off.
+ * It returns the number of bytes written and an error, if any.
+ * WriteAt returns a non-nil error when n != len(b).
+ *
+ * If file was opened with the O_APPEND flag, WriteAt returns an error.
+ */
+ writeAt(b: string|Array, off: number): number
+ }
+ interface File {
+ /**
+ * WriteTo implements io.WriterTo.
+ */
+ writeTo(w: io.Writer): number
+ }
+ /**
+ * noWriteTo can be embedded alongside another type to
+ * hide the WriteTo method of that other type.
+ */
+ interface noWriteTo {
+ }
+ interface noWriteTo {
+ /**
+ * WriteTo hides another WriteTo method.
+ * It should never be called.
+ */
+ writeTo(_arg0: io.Writer): number
+ }
+ /**
+ * fileWithoutWriteTo implements all the methods of *File other
+ * than WriteTo. This is used to permit WriteTo to call io.Copy
+ * without leading to a recursive call to WriteTo.
+ */
+ type _swCDegm = noWriteTo&File
+ interface fileWithoutWriteTo extends _swCDegm {
+ }
+ interface File {
+ /**
+ * Seek sets the offset for the next Read or Write on file to offset, interpreted
+ * according to whence: 0 means relative to the origin of the file, 1 means
+ * relative to the current offset, and 2 means relative to the end.
+ * It returns the new offset and an error, if any.
+ * The behavior of Seek on a file opened with O_APPEND is not specified.
+ */
+ seek(offset: number, whence: number): number
+ }
+ interface File {
+ /**
+ * WriteString is like Write, but writes the contents of string s rather than
+ * a slice of bytes.
+ */
+ writeString(s: string): number
+ }
+ interface mkdir {
+ /**
+ * Mkdir creates a new directory with the specified name and permission
+ * bits (before umask).
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string, perm: FileMode): void
+ }
+ interface chdir {
+ /**
+ * Chdir changes the current working directory to the named directory.
+ * If there is an error, it will be of type *PathError.
+ */
+ (dir: string): void
+ }
+ interface open {
+ /**
+ * Open opens the named file for reading. If successful, methods on
+ * the returned file can be used for reading; the associated file
+ * descriptor has mode O_RDONLY.
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string): (File)
+ }
+ interface create {
+ /**
+ * Create creates or truncates the named file. If the file already exists,
+ * it is truncated. If the file does not exist, it is created with mode 0o666
+ * (before umask). If successful, methods on the returned File can
+ * be used for I/O; the associated file descriptor has mode O_RDWR.
+ * The directory containing the file must already exist.
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string): (File)
+ }
+ interface openFile {
+ /**
+ * OpenFile is the generalized open call; most users will use Open
+ * or Create instead. It opens the named file with specified flag
+ * (O_RDONLY etc.). If the file does not exist, and the O_CREATE flag
+ * is passed, it is created with mode perm (before umask);
+ * the containing directory must exist. If successful,
+ * methods on the returned File can be used for I/O.
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string, flag: number, perm: FileMode): (File)
+ }
+ interface rename {
+ /**
+ * Rename renames (moves) oldpath to newpath.
+ * If newpath already exists and is not a directory, Rename replaces it.
+ * If newpath already exists and is a directory, Rename returns an error.
+ * OS-specific restrictions may apply when oldpath and newpath are in different directories.
+ * Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
+ * If there is an error, it will be of type *LinkError.
+ */
+ (oldpath: string, newpath: string): void
+ }
+ interface readlink {
+ /**
+ * Readlink returns the destination of the named symbolic link.
+ * If there is an error, it will be of type *PathError.
+ *
+ * If the link destination is relative, Readlink returns the relative path
+ * without resolving it to an absolute one.
+ */
+ (name: string): string
+ }
+ interface tempDir {
+ /**
+ * TempDir returns the default directory to use for temporary files.
+ *
+ * On Unix systems, it returns $TMPDIR if non-empty, else /tmp.
+ * On Windows, it uses GetTempPath, returning the first non-empty
+ * value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
+ * On Plan 9, it returns /tmp.
+ *
+ * The directory is neither guaranteed to exist nor have accessible
+ * permissions.
+ */
+ (): string
+ }
+ interface userCacheDir {
+ /**
+ * UserCacheDir returns the default root directory to use for user-specific
+ * cached data. Users should create their own application-specific subdirectory
+ * within this one and use that.
+ *
+ * On Unix systems, it returns $XDG_CACHE_HOME as specified by
+ * https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if
+ * non-empty, else $HOME/.cache.
+ * On Darwin, it returns $HOME/Library/Caches.
+ * On Windows, it returns %LocalAppData%.
+ * On Plan 9, it returns $home/lib/cache.
+ *
+ * If the location cannot be determined (for example, $HOME is not defined) or
+ * the path in $XDG_CACHE_HOME is relative, then it will return an error.
+ */
+ (): string
+ }
+ interface userConfigDir {
+ /**
+ * UserConfigDir returns the default root directory to use for user-specific
+ * configuration data. Users should create their own application-specific
+ * subdirectory within this one and use that.
+ *
+ * On Unix systems, it returns $XDG_CONFIG_HOME as specified by
+ * https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if
+ * non-empty, else $HOME/.config.
+ * On Darwin, it returns $HOME/Library/Application Support.
+ * On Windows, it returns %AppData%.
+ * On Plan 9, it returns $home/lib.
+ *
+ * If the location cannot be determined (for example, $HOME is not defined) or
+ * the path in $XDG_CONFIG_HOME is relative, then it will return an error.
+ */
+ (): string
+ }
+ interface userHomeDir {
+ /**
+ * UserHomeDir returns the current user's home directory.
+ *
+ * On Unix, including macOS, it returns the $HOME environment variable.
+ * On Windows, it returns %USERPROFILE%.
+ * On Plan 9, it returns the $home environment variable.
+ *
+ * If the expected variable is not set in the environment, UserHomeDir
+ * returns either a platform-specific default value or a non-nil error.
+ */
+ (): string
+ }
+ interface chmod {
+ /**
+ * Chmod changes the mode of the named file to mode.
+ * If the file is a symbolic link, it changes the mode of the link's target.
+ * If there is an error, it will be of type *PathError.
+ *
+ * A different subset of the mode bits are used, depending on the
+ * operating system.
+ *
+ * On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
+ * ModeSticky are used.
+ *
+ * On Windows, only the 0o200 bit (owner writable) of mode is used; it
+ * controls whether the file's read-only attribute is set or cleared.
+ * The other bits are currently unused. For compatibility with Go 1.12
+ * and earlier, use a non-zero mode. Use mode 0o400 for a read-only
+ * file and 0o600 for a readable+writable file.
+ *
+ * On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
+ * and ModeTemporary are used.
+ */
+ (name: string, mode: FileMode): void
+ }
+ interface File {
+ /**
+ * Chmod changes the mode of the file to mode.
+ * If there is an error, it will be of type *PathError.
+ */
+ chmod(mode: FileMode): void
+ }
+ interface File {
+ /**
+ * SetDeadline sets the read and write deadlines for a File.
+ * It is equivalent to calling both SetReadDeadline and SetWriteDeadline.
+ *
+ * Only some kinds of files support setting a deadline. Calls to SetDeadline
+ * for files that do not support deadlines will return ErrNoDeadline.
+ * On most systems ordinary files do not support deadlines, but pipes do.
+ *
+ * A deadline is an absolute time after which I/O operations fail with an
+ * error instead of blocking. The deadline applies to all future and pending
+ * I/O, not just the immediately following call to Read or Write.
+ * After a deadline has been exceeded, the connection can be refreshed
+ * by setting a deadline in the future.
+ *
+ * If the deadline is exceeded a call to Read or Write or to other I/O
+ * methods will return an error that wraps ErrDeadlineExceeded.
+ * This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
+ * That error implements the Timeout method, and calling the Timeout
+ * method will return true, but there are other possible errors for which
+ * the Timeout will return true even if the deadline has not been exceeded.
+ *
+ * An idle timeout can be implemented by repeatedly extending
+ * the deadline after successful Read or Write calls.
+ *
+ * A zero value for t means I/O operations will not time out.
+ */
+ setDeadline(t: time.Time): void
+ }
+ interface File {
+ /**
+ * SetReadDeadline sets the deadline for future Read calls and any
+ * currently-blocked Read call.
+ * A zero value for t means Read will not time out.
+ * Not all files support setting deadlines; see SetDeadline.
+ */
+ setReadDeadline(t: time.Time): void
+ }
+ interface File {
+ /**
+ * SetWriteDeadline sets the deadline for any future Write calls and any
+ * currently-blocked Write call.
+ * Even if Write times out, it may return n > 0, indicating that
+ * some of the data was successfully written.
+ * A zero value for t means Write will not time out.
+ * Not all files support setting deadlines; see SetDeadline.
+ */
+ setWriteDeadline(t: time.Time): void
+ }
+ interface File {
+ /**
+ * SyscallConn returns a raw file.
+ * This implements the syscall.Conn interface.
+ */
+ syscallConn(): syscall.RawConn
+ }
+ interface dirFS {
+ /**
+ * DirFS returns a file system (an fs.FS) for the tree of files rooted at the directory dir.
+ *
+ * Note that DirFS("/prefix") only guarantees that the Open calls it makes to the
+ * operating system will begin with "/prefix": DirFS("/prefix").Open("file") is the
+ * same as os.Open("/prefix/file"). So if /prefix/file is a symbolic link pointing outside
+ * the /prefix tree, then using DirFS does not stop the access any more than using
+ * os.Open does. Additionally, the root of the fs.FS returned for a relative path,
+ * DirFS("prefix"), will be affected by later calls to Chdir. DirFS is therefore not
+ * a general substitute for a chroot-style security mechanism when the directory tree
+ * contains arbitrary content.
+ *
+ * Use [Root.FS] to obtain a fs.FS that prevents escapes from the tree via symbolic links.
+ *
+ * The directory dir must not be "".
+ *
+ * The result implements [io/fs.StatFS], [io/fs.ReadFileFS] and
+ * [io/fs.ReadDirFS].
+ */
+ (dir: string): fs.FS
+ }
+ interface dirFS extends String{}
+ interface dirFS {
+ open(name: string): fs.File
+ }
+ interface dirFS {
+ /**
+ * The ReadFile method calls the [ReadFile] function for the file
+ * with the given name in the directory. The function provides
+ * robust handling for small files and special file systems.
+ * Through this method, dirFS implements [io/fs.ReadFileFS].
+ */
+ readFile(name: string): string|Array
+ }
+ interface dirFS {
+ /**
+ * ReadDir reads the named directory, returning all its directory entries sorted
+ * by filename. Through this method, dirFS implements [io/fs.ReadDirFS].
+ */
+ readDir(name: string): Array
+ }
+ interface dirFS {
+ stat(name: string): fs.FileInfo
+ }
+ interface readFile {
+ /**
+ * ReadFile reads the named file and returns the contents.
+ * A successful call returns err == nil, not err == EOF.
+ * Because ReadFile reads the whole file, it does not treat an EOF from Read
+ * as an error to be reported.
+ */
+ (name: string): string|Array
+ }
+ interface writeFile {
+ /**
+ * WriteFile writes data to the named file, creating it if necessary.
+ * If the file does not exist, WriteFile creates it with permissions perm (before umask);
+ * otherwise WriteFile truncates it before writing, without changing permissions.
+ * Since WriteFile requires multiple system calls to complete, a failure mid-operation
+ * can leave the file in a partially written state.
+ */
+ (name: string, data: string|Array, perm: FileMode): void
+ }
+ interface File {
+ /**
+ * Close closes the [File], rendering it unusable for I/O.
+ * On files that support [File.SetDeadline], any pending I/O operations will
+ * be canceled and return immediately with an [ErrClosed] error.
+ * Close will return an error if it has already been called.
+ */
+ close(): void
+ }
+ interface chown {
+ /**
+ * Chown changes the numeric uid and gid of the named file.
+ * If the file is a symbolic link, it changes the uid and gid of the link's target.
+ * A uid or gid of -1 means to not change that value.
+ * If there is an error, it will be of type [*PathError].
+ *
+ * On Windows or Plan 9, Chown always returns the [syscall.EWINDOWS] or
+ * EPLAN9 error, wrapped in *PathError.
+ */
+ (name: string, uid: number, gid: number): void
+ }
+ interface lchown {
+ /**
+ * Lchown changes the numeric uid and gid of the named file.
+ * If the file is a symbolic link, it changes the uid and gid of the link itself.
+ * If there is an error, it will be of type [*PathError].
+ *
+ * On Windows, it always returns the [syscall.EWINDOWS] error, wrapped
+ * in *PathError.
+ */
+ (name: string, uid: number, gid: number): void
+ }
+ interface File {
+ /**
+ * Chown changes the numeric uid and gid of the named file.
+ * If there is an error, it will be of type [*PathError].
+ *
+ * On Windows, it always returns the [syscall.EWINDOWS] error, wrapped
+ * in *PathError.
+ */
+ chown(uid: number, gid: number): void
+ }
+ interface File {
+ /**
+ * Truncate changes the size of the file.
+ * It does not change the I/O offset.
+ * If there is an error, it will be of type [*PathError].
+ */
+ truncate(size: number): void
+ }
+ interface File {
+ /**
+ * Sync commits the current contents of the file to stable storage.
+ * Typically, this means flushing the file system's in-memory copy
+ * of recently written data to disk.
+ */
+ sync(): void
+ }
+ interface chtimes {
+ /**
+ * Chtimes changes the access and modification times of the named
+ * file, similar to the Unix utime() or utimes() functions.
+ * A zero [time.Time] value will leave the corresponding file time unchanged.
+ *
+ * The underlying filesystem may truncate or round the values to a
+ * less precise time unit.
+ * If there is an error, it will be of type [*PathError].
+ */
+ (name: string, atime: time.Time, mtime: time.Time): void
+ }
+ interface File {
+ /**
+ * Chdir changes the current working directory to the file,
+ * which must be a directory.
+ * If there is an error, it will be of type [*PathError].
+ */
+ chdir(): void
+ }
+ /**
+ * file is the real representation of *File.
+ * The extra level of indirection ensures that no clients of os
+ * can overwrite this data, which could cause the finalizer
+ * to close the wrong file descriptor.
+ */
+ interface file {
+ }
+ interface File {
+ /**
+ * Fd returns the integer Unix file descriptor referencing the open file.
+ * If f is closed, the file descriptor becomes invalid.
+ * If f is garbage collected, a finalizer may close the file descriptor,
+ * making it invalid; see [runtime.SetFinalizer] for more information on when
+ * a finalizer might be run. On Unix systems this will cause the [File.SetDeadline]
+ * methods to stop working.
+ * Because file descriptors can be reused, the returned file descriptor may
+ * only be closed through the [File.Close] method of f, or by its finalizer during
+ * garbage collection. Otherwise, during garbage collection the finalizer
+ * may close an unrelated file descriptor with the same (reused) number.
+ *
+ * As an alternative, see the f.SyscallConn method.
+ */
+ fd(): number
+ }
+ interface newFile {
+ /**
+ * NewFile returns a new File with the given file descriptor and
+ * name. The returned value will be nil if fd is not a valid file
+ * descriptor. On Unix systems, if the file descriptor is in
+ * non-blocking mode, NewFile will attempt to return a pollable File
+ * (one for which the SetDeadline methods work).
+ *
+ * After passing it to NewFile, fd may become invalid under the same
+ * conditions described in the comments of the Fd method, and the same
+ * constraints apply.
+ */
+ (fd: number, name: string): (File)
+ }
+ /**
+ * newFileKind describes the kind of file to newFile.
+ */
+ interface newFileKind extends Number{}
+ interface truncate {
+ /**
+ * Truncate changes the size of the named file.
+ * If the file is a symbolic link, it changes the size of the link's target.
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string, size: number): void
+ }
+ interface remove {
+ /**
+ * Remove removes the named file or (empty) directory.
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string): void
+ }
+ interface link {
+ /**
+ * Link creates newname as a hard link to the oldname file.
+ * If there is an error, it will be of type *LinkError.
+ */
+ (oldname: string, newname: string): void
+ }
+ interface symlink {
+ /**
+ * Symlink creates newname as a symbolic link to oldname.
+ * On Windows, a symlink to a non-existent oldname creates a file symlink;
+ * if oldname is later created as a directory the symlink will not work.
+ * If there is an error, it will be of type *LinkError.
+ */
+ (oldname: string, newname: string): void
+ }
+ interface unixDirent {
+ }
+ interface unixDirent {
+ name(): string
+ }
+ interface unixDirent {
+ isDir(): boolean
+ }
+ interface unixDirent {
+ type(): FileMode
+ }
+ interface unixDirent {
+ info(): FileInfo
+ }
+ interface unixDirent {
+ string(): string
+ }
+ interface getwd {
+ /**
+ * Getwd returns an absolute path name corresponding to the
+ * current directory. If the current directory can be
+ * reached via multiple paths (due to symbolic links),
+ * Getwd may return any one of them.
+ *
+ * On Unix platforms, if the environment variable PWD
+ * provides an absolute name, and it is a name of the
+ * current directory, it is returned.
+ */
+ (): string
+ }
+ interface mkdirAll {
+ /**
+ * MkdirAll creates a directory named path,
+ * along with any necessary parents, and returns nil,
+ * or else returns an error.
+ * The permission bits perm (before umask) are used for all
+ * directories that MkdirAll creates.
+ * If path is already a directory, MkdirAll does nothing
+ * and returns nil.
+ */
+ (path: string, perm: FileMode): void
+ }
+ interface removeAll {
+ /**
+ * RemoveAll removes path and any children it contains.
+ * It removes everything it can but returns the first error
+ * it encounters. If the path does not exist, RemoveAll
+ * returns nil (no error).
+ * If there is an error, it will be of type [*PathError].
+ */
+ (path: string): void
+ }
+ interface isPathSeparator {
+ /**
+ * IsPathSeparator reports whether c is a directory separator character.
+ */
+ (c: number): boolean
+ }
+ interface pipe {
+ /**
+ * Pipe returns a connected pair of Files; reads from r return bytes written to w.
+ * It returns the files and an error, if any.
+ */
+ (): [(File), (File)]
+ }
+ interface getuid {
+ /**
+ * Getuid returns the numeric user id of the caller.
+ *
+ * On Windows, it returns -1.
+ */
+ (): number
+ }
+ interface geteuid {
+ /**
+ * Geteuid returns the numeric effective user id of the caller.
+ *
+ * On Windows, it returns -1.
+ */
+ (): number
+ }
+ interface getgid {
+ /**
+ * Getgid returns the numeric group id of the caller.
+ *
+ * On Windows, it returns -1.
+ */
+ (): number
+ }
+ interface getegid {
+ /**
+ * Getegid returns the numeric effective group id of the caller.
+ *
+ * On Windows, it returns -1.
+ */
+ (): number
+ }
+ interface getgroups {
+ /**
+ * Getgroups returns a list of the numeric ids of groups that the caller belongs to.
+ *
+ * On Windows, it returns [syscall.EWINDOWS]. See the [os/user] package
+ * for a possible alternative.
+ */
+ (): Array
+ }
+ interface exit {
+ /**
+ * Exit causes the current program to exit with the given status code.
+ * Conventionally, code zero indicates success, non-zero an error.
+ * The program terminates immediately; deferred functions are not run.
+ *
+ * For portability, the status code should be in the range [0, 125].
+ */
+ (code: number): void
+ }
+ /**
+ * rawConn implements syscall.RawConn.
+ */
+ interface rawConn {
+ }
+ interface rawConn {
+ control(f: (_arg0: number) => void): void
+ }
+ interface rawConn {
+ read(f: (_arg0: number) => boolean): void
+ }
+ interface rawConn {
+ write(f: (_arg0: number) => boolean): void
+ }
+ interface openInRoot {
+ /**
+ * OpenInRoot opens the file name in the directory dir.
+ * It is equivalent to OpenRoot(dir) followed by opening the file in the root.
+ *
+ * OpenInRoot returns an error if any component of the name
+ * references a location outside of dir.
+ *
+ * See [Root] for details and limitations.
+ */
+ (dir: string, name: string): (File)
+ }
+ /**
+ * Root may be used to only access files within a single directory tree.
+ *
+ * Methods on Root can only access files and directories beneath a root directory.
+ * If any component of a file name passed to a method of Root references a location
+ * outside the root, the method returns an error.
+ * File names may reference the directory itself (.).
+ *
+ * Methods on Root will follow symbolic links, but symbolic links may not
+ * reference a location outside the root.
+ * Symbolic links must not be absolute.
+ *
+ * Methods on Root do not prohibit traversal of filesystem boundaries,
+ * Linux bind mounts, /proc special files, or access to Unix device files.
+ *
+ * Methods on Root are safe to be used from multiple goroutines simultaneously.
+ *
+ * On most platforms, creating a Root opens a file descriptor or handle referencing
+ * the directory. If the directory is moved, methods on Root reference the original
+ * directory in its new location.
+ *
+ * Root's behavior differs on some platforms:
+ *
+ * ```
+ * - When GOOS=windows, file names may not reference Windows reserved device names
+ * such as NUL and COM1.
+ * - When GOOS=js, Root is vulnerable to TOCTOU (time-of-check-time-of-use)
+ * attacks in symlink validation, and cannot ensure that operations will not
+ * escape the root.
+ * - When GOOS=plan9 or GOOS=js, Root does not track directories across renames.
+ * On these platforms, a Root references a directory name, not a file descriptor.
+ * ```
+ */
+ interface Root {
+ }
+ interface openRoot {
+ /**
+ * OpenRoot opens the named directory.
+ * If there is an error, it will be of type *PathError.
+ */
+ (name: string): (Root)
+ }
+ interface Root {
+ /**
+ * Name returns the name of the directory presented to OpenRoot.
+ *
+ * It is safe to call Name after [Close].
+ */
+ name(): string
+ }
+ interface Root {
+ /**
+ * Close closes the Root.
+ * After Close is called, methods on Root return errors.
+ */
+ close(): void
+ }
+ interface Root {
+ /**
+ * Open opens the named file in the root for reading.
+ * See [Open] for more details.
+ */
+ open(name: string): (File)
+ }
+ interface Root {
+ /**
+ * Create creates or truncates the named file in the root.
+ * See [Create] for more details.
+ */
+ create(name: string): (File)
+ }
+ interface Root {
+ /**
+ * OpenFile opens the named file in the root.
+ * See [OpenFile] for more details.
+ *
+ * If perm contains bits other than the nine least-significant bits (0o777),
+ * OpenFile returns an error.
+ */
+ openFile(name: string, flag: number, perm: FileMode): (File)
+ }
+ interface Root {
+ /**
+ * OpenRoot opens the named directory in the root.
+ * If there is an error, it will be of type *PathError.
+ */
+ openRoot(name: string): (Root)
+ }
+ interface Root {
+ /**
+ * Mkdir creates a new directory in the root
+ * with the specified name and permission bits (before umask).
+ * See [Mkdir] for more details.
+ *
+ * If perm contains bits other than the nine least-significant bits (0o777),
+ * OpenFile returns an error.
+ */
+ mkdir(name: string, perm: FileMode): void
+ }
+ interface Root {
+ /**
+ * Remove removes the named file or (empty) directory in the root.
+ * See [Remove] for more details.
+ */
+ remove(name: string): void
+ }
+ interface Root {
+ /**
+ * Stat returns a [FileInfo] describing the named file in the root.
+ * See [Stat] for more details.
+ */
+ stat(name: string): FileInfo
+ }
+ interface Root {
+ /**
+ * Lstat returns a [FileInfo] describing the named file in the root.
+ * If the file is a symbolic link, the returned FileInfo
+ * describes the symbolic link.
+ * See [Lstat] for more details.
+ */
+ lstat(name: string): FileInfo
+ }
+ interface Root {
+ /**
+ * FS returns a file system (an fs.FS) for the tree of files in the root.
+ *
+ * The result implements [io/fs.StatFS], [io/fs.ReadFileFS] and
+ * [io/fs.ReadDirFS].
+ */
+ fs(): fs.FS
+ }
+ interface rootFS extends Root{}
+ interface rootFS {
+ open(name: string): fs.File
+ }
+ interface rootFS {
+ readDir(name: string): Array
+ }
+ interface rootFS {
+ readFile(name: string): string|Array
+ }
+ interface rootFS {
+ stat(name: string): FileInfo
+ }
+ /**
+ * root implementation for platforms with a function to open a file
+ * relative to a directory.
+ */
+ interface root {
+ }
+ interface root {
+ close(): void
+ }
+ interface root {
+ name(): string
+ }
+ /**
+ * errSymlink reports that a file being operated on is actually a symlink,
+ * and the target of that symlink.
+ */
+ interface errSymlink extends String{}
+ interface errSymlink {
+ error(): string
+ }
+ interface sysfdType extends Number{}
+ interface stat {
+ /**
+ * Stat returns a [FileInfo] describing the named file.
+ * If there is an error, it will be of type [*PathError].
+ */
+ (name: string): FileInfo
+ }
+ interface lstat {
+ /**
+ * Lstat returns a [FileInfo] describing the named file.
+ * If the file is a symbolic link, the returned FileInfo
+ * describes the symbolic link. Lstat makes no attempt to follow the link.
+ * If there is an error, it will be of type [*PathError].
+ *
+ * On Windows, if the file is a reparse point that is a surrogate for another
+ * named entity (such as a symbolic link or mounted folder), the returned
+ * FileInfo describes the reparse point, and makes no attempt to resolve it.
+ */
+ (name: string): FileInfo
+ }
+ interface File {
+ /**
+ * Stat returns the [FileInfo] structure describing file.
+ * If there is an error, it will be of type [*PathError].
+ */
+ stat(): FileInfo
+ }
+ interface hostname {
+ /**
+ * Hostname returns the host name reported by the kernel.
+ */
+ (): string
+ }
+ interface createTemp {
+ /**
+ * CreateTemp creates a new temporary file in the directory dir,
+ * opens the file for reading and writing, and returns the resulting file.
+ * The filename is generated by taking pattern and adding a random string to the end.
+ * If pattern includes a "*", the random string replaces the last "*".
+ * The file is created with mode 0o600 (before umask).
+ * If dir is the empty string, CreateTemp uses the default directory for temporary files, as returned by [TempDir].
+ * Multiple programs or goroutines calling CreateTemp simultaneously will not choose the same file.
+ * The caller can use the file's Name method to find the pathname of the file.
+ * It is the caller's responsibility to remove the file when it is no longer needed.
+ */
+ (dir: string, pattern: string): (File)
+ }
+ interface mkdirTemp {
+ /**
+ * MkdirTemp creates a new temporary directory in the directory dir
+ * and returns the pathname of the new directory.
+ * The new directory's name is generated by adding a random string to the end of pattern.
+ * If pattern includes a "*", the random string replaces the last "*" instead.
+ * The directory is created with mode 0o700 (before umask).
+ * If dir is the empty string, MkdirTemp uses the default directory for temporary files, as returned by TempDir.
+ * Multiple programs or goroutines calling MkdirTemp simultaneously will not choose the same directory.
+ * It is the caller's responsibility to remove the directory when it is no longer needed.
+ */
+ (dir: string, pattern: string): string
+ }
+ interface getpagesize {
+ /**
+ * Getpagesize returns the underlying system's memory page size.
+ */
+ (): number
+ }
+ /**
+ * File represents an open file descriptor.
+ *
+ * The methods of File are safe for concurrent use.
+ */
+ type _sPrVXpP = file
+ interface File extends _sPrVXpP {
+ }
+ /**
+ * A FileInfo describes a file and is returned by [Stat] and [Lstat].
+ */
+ interface FileInfo extends fs.FileInfo{}
+ /**
+ * A FileMode represents a file's mode and permission bits.
+ * The bits have the same definition on all systems, so that
+ * information about files can be moved from one system
+ * to another portably. Not all bits apply to all systems.
+ * The only required bit is [ModeDir] for directories.
+ */
+ interface FileMode extends fs.FileMode{}
+ interface fileStat {
+ name(): string
+ }
+ interface fileStat {
+ isDir(): boolean
+ }
+ interface sameFile {
+ /**
+ * SameFile reports whether fi1 and fi2 describe the same file.
+ * For example, on Unix this means that the device and inode fields
+ * of the two underlying structures are identical; on other systems
+ * the decision may be based on the path names.
+ * SameFile only applies to results returned by this package's [Stat].
+ * It returns false in other cases.
+ */
+ (fi1: FileInfo, fi2: FileInfo): boolean
+ }
+ /**
+ * A fileStat is the implementation of FileInfo returned by Stat and Lstat.
+ */
+ interface fileStat {
+ }
+ interface fileStat {
+ size(): number
+ }
+ interface fileStat {
+ mode(): FileMode
+ }
+ interface fileStat {
+ modTime(): time.Time
+ }
+ interface fileStat {
+ sys(): any
+ }
+}
+
+/**
+ * Package filepath implements utility routines for manipulating filename paths
+ * in a way compatible with the target operating system-defined file paths.
+ *
+ * The filepath package uses either forward slashes or backslashes,
+ * depending on the operating system. To process paths such as URLs
+ * that always use forward slashes regardless of the operating
+ * system, see the [path] package.
+ */
+namespace filepath {
+ interface match {
+ /**
+ * Match reports whether name matches the shell file name pattern.
+ * The pattern syntax is:
+ *
+ * ```
+ * pattern:
+ * { term }
+ * term:
+ * '*' matches any sequence of non-Separator characters
+ * '?' matches any single non-Separator character
+ * '[' [ '^' ] { character-range } ']'
+ * character class (must be non-empty)
+ * c matches character c (c != '*', '?', '\\', '[')
+ * '\\' c matches character c
+ *
+ * character-range:
+ * c matches character c (c != '\\', '-', ']')
+ * '\\' c matches character c
+ * lo '-' hi matches character c for lo <= c <= hi
+ * ```
+ *
+ * Match requires pattern to match all of name, not just a substring.
+ * The only possible returned error is [ErrBadPattern], when pattern
+ * is malformed.
+ *
+ * On Windows, escaping is disabled. Instead, '\\' is treated as
+ * path separator.
+ */
+ (pattern: string, name: string): boolean
+ }
+ interface glob {
+ /**
+ * Glob returns the names of all files matching pattern or nil
+ * if there is no matching file. The syntax of patterns is the same
+ * as in [Match]. The pattern may describe hierarchical names such as
+ * /usr/*\/bin/ed (assuming the [Separator] is '/').
+ *
+ * Glob ignores file system errors such as I/O errors reading directories.
+ * The only possible returned error is [ErrBadPattern], when pattern
+ * is malformed.
+ */
+ (pattern: string): Array
+ }
+ interface clean {
+ /**
+ * Clean returns the shortest path name equivalent to path
+ * by purely lexical processing. It applies the following rules
+ * iteratively until no further processing can be done:
+ *
+ * 1. Replace multiple [Separator] elements with a single one.
+ * 2. Eliminate each . path name element (the current directory).
+ * 3. Eliminate each inner .. path name element (the parent directory)
+ * ```
+ * along with the non-.. element that precedes it.
+ * ```
+ * 4. Eliminate .. elements that begin a rooted path:
+ * ```
+ * that is, replace "/.." by "/" at the beginning of a path,
+ * assuming Separator is '/'.
+ * ```
+ *
+ * The returned path ends in a slash only if it represents a root directory,
+ * such as "/" on Unix or `C:\` on Windows.
+ *
+ * Finally, any occurrences of slash are replaced by Separator.
+ *
+ * If the result of this process is an empty string, Clean
+ * returns the string ".".
+ *
+ * On Windows, Clean does not modify the volume name other than to replace
+ * occurrences of "/" with `\`.
+ * For example, Clean("//host/share/../x") returns `\\host\share\x`.
+ *
+ * See also Rob Pike, “Lexical File Names in Plan 9 or
+ * Getting Dot-Dot Right,”
+ * https://9p.io/sys/doc/lexnames.html
+ */
+ (path: string): string
+ }
+ interface isLocal {
+ /**
+ * IsLocal reports whether path, using lexical analysis only, has all of these properties:
+ *
+ * ```
+ * - is within the subtree rooted at the directory in which path is evaluated
+ * - is not an absolute path
+ * - is not empty
+ * - on Windows, is not a reserved name such as "NUL"
+ * ```
+ *
+ * If IsLocal(path) returns true, then
+ * Join(base, path) will always produce a path contained within base and
+ * Clean(path) will always produce an unrooted path with no ".." path elements.
+ *
+ * IsLocal is a purely lexical operation.
+ * In particular, it does not account for the effect of any symbolic links
+ * that may exist in the filesystem.
+ */
+ (path: string): boolean
+ }
+ interface localize {
+ /**
+ * Localize converts a slash-separated path into an operating system path.
+ * The input path must be a valid path as reported by [io/fs.ValidPath].
+ *
+ * Localize returns an error if the path cannot be represented by the operating system.
+ * For example, the path a\b is rejected on Windows, on which \ is a separator
+ * character and cannot be part of a filename.
+ *
+ * The path returned by Localize will always be local, as reported by IsLocal.
+ */
+ (path: string): string
+ }
+ interface toSlash {
+ /**
+ * ToSlash returns the result of replacing each separator character
+ * in path with a slash ('/') character. Multiple separators are
+ * replaced by multiple slashes.
+ */
+ (path: string): string
+ }
+ interface fromSlash {
+ /**
+ * FromSlash returns the result of replacing each slash ('/') character
+ * in path with a separator character. Multiple slashes are replaced
+ * by multiple separators.
+ *
+ * See also the Localize function, which converts a slash-separated path
+ * as used by the io/fs package to an operating system path.
+ */
+ (path: string): string
+ }
+ interface splitList {
+ /**
+ * SplitList splits a list of paths joined by the OS-specific [ListSeparator],
+ * usually found in PATH or GOPATH environment variables.
+ * Unlike strings.Split, SplitList returns an empty slice when passed an empty
+ * string.
+ */
+ (path: string): Array
+ }
+ interface split {
+ /**
+ * Split splits path immediately following the final [Separator],
+ * separating it into a directory and file name component.
+ * If there is no Separator in path, Split returns an empty dir
+ * and file set to path.
+ * The returned values have the property that path = dir+file.
+ */
+ (path: string): [string, string]
+ }
+ interface join {
+ /**
+ * Join joins any number of path elements into a single path,
+ * separating them with an OS specific [Separator]. Empty elements
+ * are ignored. The result is Cleaned. However, if the argument
+ * list is empty or all its elements are empty, Join returns
+ * an empty string.
+ * On Windows, the result will only be a UNC path if the first
+ * non-empty element is a UNC path.
+ */
+ (...elem: string[]): string
+ }
+ interface ext {
+ /**
+ * Ext returns the file name extension used by path.
+ * The extension is the suffix beginning at the final dot
+ * in the final element of path; it is empty if there is
+ * no dot.
+ */
+ (path: string): string
+ }
+ interface evalSymlinks {
+ /**
+ * EvalSymlinks returns the path name after the evaluation of any symbolic
+ * links.
+ * If path is relative the result will be relative to the current directory,
+ * unless one of the components is an absolute symbolic link.
+ * EvalSymlinks calls [Clean] on the result.
+ */
+ (path: string): string
+ }
+ interface isAbs {
+ /**
+ * IsAbs reports whether the path is absolute.
+ */
+ (path: string): boolean
+ }
+ interface abs {
+ /**
+ * Abs returns an absolute representation of path.
+ * If the path is not absolute it will be joined with the current
+ * working directory to turn it into an absolute path. The absolute
+ * path name for a given file is not guaranteed to be unique.
+ * Abs calls [Clean] on the result.
+ */
+ (path: string): string
+ }
+ interface rel {
+ /**
+ * Rel returns a relative path that is lexically equivalent to targpath when
+ * joined to basepath with an intervening separator. That is,
+ * [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
+ * On success, the returned path will always be relative to basepath,
+ * even if basepath and targpath share no elements.
+ * An error is returned if targpath can't be made relative to basepath or if
+ * knowing the current working directory would be necessary to compute it.
+ * Rel calls [Clean] on the result.
+ */
+ (basepath: string, targpath: string): string
+ }
+ /**
+ * WalkFunc is the type of the function called by [Walk] to visit each
+ * file or directory.
+ *
+ * The path argument contains the argument to Walk as a prefix.
+ * That is, if Walk is called with root argument "dir" and finds a file
+ * named "a" in that directory, the walk function will be called with
+ * argument "dir/a".
+ *
+ * The directory and file are joined with Join, which may clean the
+ * directory name: if Walk is called with the root argument "x/../dir"
+ * and finds a file named "a" in that directory, the walk function will
+ * be called with argument "dir/a", not "x/../dir/a".
+ *
+ * The info argument is the fs.FileInfo for the named path.
+ *
+ * The error result returned by the function controls how Walk continues.
+ * If the function returns the special value [SkipDir], Walk skips the
+ * current directory (path if info.IsDir() is true, otherwise path's
+ * parent directory). If the function returns the special value [SkipAll],
+ * Walk skips all remaining files and directories. Otherwise, if the function
+ * returns a non-nil error, Walk stops entirely and returns that error.
+ *
+ * The err argument reports an error related to path, signaling that Walk
+ * will not walk into that directory. The function can decide how to
+ * handle that error; as described earlier, returning the error will
+ * cause Walk to stop walking the entire tree.
+ *
+ * Walk calls the function with a non-nil err argument in two cases.
+ *
+ * First, if an [os.Lstat] on the root directory or any directory or file
+ * in the tree fails, Walk calls the function with path set to that
+ * directory or file's path, info set to nil, and err set to the error
+ * from os.Lstat.
+ *
+ * Second, if a directory's Readdirnames method fails, Walk calls the
+ * function with path set to the directory's path, info, set to an
+ * [fs.FileInfo] describing the directory, and err set to the error from
+ * Readdirnames.
+ */
+ interface WalkFunc {(path: string, info: fs.FileInfo, err: Error): void }
+ interface walkDir {
+ /**
+ * WalkDir walks the file tree rooted at root, calling fn for each file or
+ * directory in the tree, including root.
+ *
+ * All errors that arise visiting files and directories are filtered by fn:
+ * see the [fs.WalkDirFunc] documentation for details.
+ *
+ * The files are walked in lexical order, which makes the output deterministic
+ * but requires WalkDir to read an entire directory into memory before proceeding
+ * to walk that directory.
+ *
+ * WalkDir does not follow symbolic links.
+ *
+ * WalkDir calls fn with paths that use the separator character appropriate
+ * for the operating system. This is unlike [io/fs.WalkDir], which always
+ * uses slash separated paths.
+ */
+ (root: string, fn: fs.WalkDirFunc): void
+ }
+ interface walk {
+ /**
+ * Walk walks the file tree rooted at root, calling fn for each file or
+ * directory in the tree, including root.
+ *
+ * All errors that arise visiting files and directories are filtered by fn:
+ * see the [WalkFunc] documentation for details.
+ *
+ * The files are walked in lexical order, which makes the output deterministic
+ * but requires Walk to read an entire directory into memory before proceeding
+ * to walk that directory.
+ *
+ * Walk does not follow symbolic links.
+ *
+ * Walk is less efficient than [WalkDir], introduced in Go 1.16,
+ * which avoids calling os.Lstat on every visited file or directory.
+ */
+ (root: string, fn: WalkFunc): void
+ }
+ interface base {
+ /**
+ * Base returns the last element of path.
+ * Trailing path separators are removed before extracting the last element.
+ * If the path is empty, Base returns ".".
+ * If the path consists entirely of separators, Base returns a single separator.
+ */
+ (path: string): string
+ }
+ interface dir {
+ /**
+ * Dir returns all but the last element of path, typically the path's directory.
+ * After dropping the final element, Dir calls [Clean] on the path and trailing
+ * slashes are removed.
+ * If the path is empty, Dir returns ".".
+ * If the path consists entirely of separators, Dir returns a single separator.
+ * The returned path does not end in a separator unless it is the root directory.
+ */
+ (path: string): string
+ }
+ interface volumeName {
+ /**
+ * VolumeName returns leading volume name.
+ * Given "C:\foo\bar" it returns "C:" on Windows.
+ * Given "\\host\share\foo" it returns "\\host\share".
+ * On other platforms it returns "".
+ */
+ (path: string): string
+ }
+ interface hasPrefix {
+ /**
+ * HasPrefix exists for historical compatibility and should not be used.
+ *
+ * Deprecated: HasPrefix does not respect path boundaries and
+ * does not ignore case when required.
+ */
+ (p: string, prefix: string): boolean
+ }
+}
+
+namespace security {
+ interface s256Challenge {
+ /**
+ * S256Challenge creates base64 encoded sha256 challenge string derived from code.
+ * The padding of the result base64 string is stripped per [RFC 7636].
+ *
+ * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2
+ */
+ (code: string): string
+ }
+ interface md5 {
+ /**
+ * MD5 creates md5 hash from the provided plain text.
+ */
+ (text: string): string
+ }
+ interface sha256 {
+ /**
+ * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text.
+ */
+ (text: string): string
+ }
+ interface sha512 {
+ /**
+ * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text.
+ */
+ (text: string): string
+ }
+ interface hs256 {
+ /**
+ * HS256 creates a HMAC hash with sha256 digest algorithm.
+ */
+ (text: string, secret: string): string
+ }
+ interface hs512 {
+ /**
+ * HS512 creates a HMAC hash with sha512 digest algorithm.
+ */
+ (text: string, secret: string): string
+ }
+ interface equal {
+ /**
+ * Equal compares two hash strings for equality without leaking timing information.
+ */
+ (hash1: string, hash2: string): boolean
+ }
+ // @ts-ignore
+ import crand = rand
+ interface encrypt {
+ /**
+ * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key).
+ *
+ * This method uses AES-256-GCM block cypher mode.
+ */
+ (data: string|Array, key: string): string
+ }
+ interface decrypt {
+ /**
+ * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key).
+ *
+ * This method uses AES-256-GCM block cypher mode.
+ */
+ (cipherText: string, key: string): string|Array
+ }
+ interface parseUnverifiedJWT {
+ /**
+ * ParseUnverifiedJWT parses JWT and returns its claims
+ * but DOES NOT verify the signature.
+ *
+ * It verifies only the exp, iat and nbf claims.
+ */
+ (token: string): jwt.MapClaims
+ }
+ interface parseJWT {
+ /**
+ * ParseJWT verifies and parses JWT and returns its claims.
+ */
+ (token: string, verificationKey: string): jwt.MapClaims
+ }
+ interface newJWT {
+ /**
+ * NewJWT generates and returns new HS256 signed JWT.
+ */
+ (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string
+ }
+ // @ts-ignore
+ import cryptoRand = rand
+ // @ts-ignore
+ import mathRand = rand
+ interface randomString {
+ /**
+ * RandomString generates a cryptographically random string with the specified length.
+ *
+ * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
+ */
+ (length: number): string
+ }
+ interface randomStringWithAlphabet {
+ /**
+ * RandomStringWithAlphabet generates a cryptographically random string
+ * with the specified length and characters set.
+ *
+ * It panics if for some reason rand.Int returns a non-nil error.
+ */
+ (length: number, alphabet: string): string
+ }
+ interface pseudorandomString {
+ /**
+ * PseudorandomString generates a pseudorandom string with the specified length.
+ *
+ * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
+ *
+ * For a cryptographically random string (but a little bit slower) use RandomString instead.
+ */
+ (length: number): string
+ }
+ interface pseudorandomStringWithAlphabet {
+ /**
+ * PseudorandomStringWithAlphabet generates a pseudorandom string
+ * with the specified length and characters set.
+ *
+ * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead.
+ */
+ (length: number, alphabet: string): string
+ }
+ interface randomStringByRegex {
+ /**
+ * RandomStringByRegex generates a random string matching the regex pattern.
+ * If optFlags is not set, fallbacks to [syntax.Perl].
+ *
+ * NB! While the source of the randomness comes from [crypto/rand] this method
+ * is not recommended to be used on its own in critical secure contexts because
+ * the generated length could vary too much on the used pattern and may not be
+ * as secure as simply calling [security.RandomString].
+ * If you still insist on using it for such purposes, consider at least
+ * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`.
+ *
+ * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages.
+ */
+ (pattern: string, ...optFlags: syntax.Flags[]): string
+ }
+}
+
+/**
+ * Package template is a thin wrapper around the standard html/template
+ * and text/template packages that implements a convenient registry to
+ * load and cache templates on the fly concurrently.
+ *
+ * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code.
+ *
+ * Example:
+ *
+ * ```
+ * registry := template.NewRegistry()
+ *
+ * html1, err := registry.LoadFiles(
+ * // the files set wil be parsed only once and then cached
+ * "layout.html",
+ * "content.html",
+ * ).Render(map[string]any{"name": "John"})
+ *
+ * html2, err := registry.LoadFiles(
+ * // reuse the already parsed and cached files set
+ * "layout.html",
+ * "content.html",
+ * ).Render(map[string]any{"name": "Jane"})
+ * ```
+ */
+namespace template {
+ interface newRegistry {
+ /**
+ * NewRegistry creates and initializes a new templates registry with
+ * some defaults (eg. global "raw" template function for unescaped HTML).
+ *
+ * Use the Registry.Load* methods to load templates into the registry.
+ */
+ (): (Registry)
+ }
+ /**
+ * Registry defines a templates registry that is safe to be used by multiple goroutines.
+ *
+ * Use the Registry.Load* methods to load templates into the registry.
+ */
+ interface Registry {
+ }
+ interface Registry {
+ /**
+ * AddFuncs registers new global template functions.
+ *
+ * The key of each map entry is the function name that will be used in the templates.
+ * If a function with the map entry name already exists it will be replaced with the new one.
+ *
+ * The value of each map entry is a function that must have either a
+ * single return value, or two return values of which the second has type error.
+ *
+ * Example:
+ *
+ * ```
+ * r.AddFuncs(map[string]any{
+ * "toUpper": func(str string) string {
+ * return strings.ToUppser(str)
+ * },
+ * ...
+ * })
+ * ```
+ */
+ addFuncs(funcs: _TygojaDict): (Registry)
+ }
+ interface Registry {
+ /**
+ * LoadFiles caches (if not already) the specified filenames set as a
+ * single template and returns a ready to use Renderer instance.
+ *
+ * There must be at least 1 filename specified.
+ */
+ loadFiles(...filenames: string[]): (Renderer)
+ }
+ interface Registry {
+ /**
+ * LoadString caches (if not already) the specified inline string as a
+ * single template and returns a ready to use Renderer instance.
+ */
+ loadString(text: string): (Renderer)
+ }
+ interface Registry {
+ /**
+ * LoadFS caches (if not already) the specified fs and globPatterns
+ * pair as single template and returns a ready to use Renderer instance.
+ *
+ * There must be at least 1 file matching the provided globPattern(s)
+ * (note that most file names serves as glob patterns matching themselves).
+ */
+ loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer)
+ }
+ /**
+ * Renderer defines a single parsed template.
+ */
+ interface Renderer {
+ }
+ interface Renderer {
+ /**
+ * Render executes the template with the specified data as the dot object
+ * and returns the result as plain string.
+ */
+ render(data: any): string
+ }
+}
+
+/**
+ * Package validation provides configurable and extensible rules for validating data of various types.
+ */
+namespace ozzo_validation {
+ /**
+ * Error interface represents an validation error
+ */
+ interface Error {
+ [key:string]: any;
+ error(): string
+ code(): string
+ message(): string
+ setMessage(_arg0: string): Error
+ params(): _TygojaDict
+ setParams(_arg0: _TygojaDict): Error
+ }
+}
+
+/**
+ * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases.
+ */
+namespace dbx {
+ /**
+ * Builder supports building SQL statements in a DB-agnostic way.
+ * Builder mainly provides two sets of query building methods: those building SELECT statements
+ * and those manipulating DB data or schema (e.g. INSERT statements, CREATE TABLE statements).
+ */
+ interface Builder {
+ [key:string]: any;
+ /**
+ * NewQuery creates a new Query object with the given SQL statement.
+ * The SQL statement may contain parameter placeholders which can be bound with actual parameter
+ * values before the statement is executed.
+ */
+ newQuery(_arg0: string): (Query)
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(..._arg0: string[]): (SelectQuery)
+ /**
+ * ModelQuery returns a new ModelQuery object that can be used to perform model insertion, update, and deletion.
+ * The parameter to this method should be a pointer to the model struct that needs to be inserted, updated, or deleted.
+ */
+ model(_arg0: {
+ }): (ModelQuery)
+ /**
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ */
+ generatePlaceholder(_arg0: number): string
+ /**
+ * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
+ */
+ quote(_arg0: string): string
+ /**
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
+ */
+ quoteSimpleTableName(_arg0: string): string
+ /**
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
+ */
+ quoteSimpleColumnName(_arg0: string): string
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ /**
+ * Insert creates a Query that represents an INSERT SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ insert(table: string, cols: Params): (Query)
+ /**
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ /**
+ * Update creates a Query that represents an UPDATE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding new column
+ * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will update ALL rows in the table).
+ */
+ update(table: string, cols: Params, where: Expression): (Query)
+ /**
+ * Delete creates a Query that represents a DELETE SQL statement.
+ * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will delete ALL rows in the table).
+ */
+ delete(table: string, where: Expression): (Query)
+ /**
+ * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column types.
+ * The optional "options" parameters will be appended to the generated SQL statement.
+ */
+ createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ /**
+ * DropTable creates a Query that can be used to drop a table.
+ */
+ dropTable(table: string): (Query)
+ /**
+ * TruncateTable creates a Query that can be used to truncate a table.
+ */
+ truncateTable(table: string): (Query)
+ /**
+ * AddColumn creates a Query that can be used to add a column to a table.
+ */
+ addColumn(table: string, col: string, typ: string): (Query)
+ /**
+ * DropColumn creates a Query that can be used to drop a column from a table.
+ */
+ dropColumn(table: string, col: string): (Query)
+ /**
+ * RenameColumn creates a Query that can be used to rename a column in a table.
+ */
+ renameColumn(table: string, oldName: string, newName: string): (Query)
+ /**
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
+ */
+ alterColumn(table: string, col: string, typ: string): (Query)
+ /**
+ * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
+ * The "name" parameter specifies the name of the primary key constraint.
+ */
+ addPrimaryKey(table: string, name: string, ...cols: string[]): (Query)
+ /**
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
+ */
+ dropPrimaryKey(table: string, name: string): (Query)
+ /**
+ * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
+ * The length of cols and refCols must be the same as they refer to the primary and referential columns.
+ * The optional "options" parameters will be appended to the SQL statement. They can be used to
+ * specify options such as "ON DELETE CASCADE".
+ */
+ addForeignKey(table: string, name: string, cols: Array, refCols: Array, refTable: string, ...options: string[]): (Query)
+ /**
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
+ */
+ dropForeignKey(table: string, name: string): (Query)
+ /**
+ * CreateIndex creates a Query that can be used to create an index for a table.
+ */
+ createIndex(table: string, name: string, ...cols: string[]): (Query)
+ /**
+ * CreateUniqueIndex creates a Query that can be used to create a unique index for a table.
+ */
+ createUniqueIndex(table: string, name: string, ...cols: string[]): (Query)
+ /**
+ * DropIndex creates a Query that can be used to remove the named index from a table.
+ */
+ dropIndex(table: string, name: string): (Query)
+ }
+ /**
+ * BaseBuilder provides a basic implementation of the Builder interface.
+ */
+ interface BaseBuilder {
+ }
+ interface newBaseBuilder {
+ /**
+ * NewBaseBuilder creates a new BaseBuilder instance.
+ */
+ (db: DB, executor: Executor): (BaseBuilder)
+ }
+ interface BaseBuilder {
+ /**
+ * DB returns the DB instance that this builder is associated with.
+ */
+ db(): (DB)
+ }
+ interface BaseBuilder {
+ /**
+ * Executor returns the executor object (a DB instance or a transaction) for executing SQL statements.
+ */
+ executor(): Executor
+ }
+ interface BaseBuilder {
+ /**
+ * NewQuery creates a new Query object with the given SQL statement.
+ * The SQL statement may contain parameter placeholders which can be bound with actual parameter
+ * values before the statement is executed.
+ */
+ newQuery(sql: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ */
+ generatePlaceholder(_arg0: number): string
+ }
+ interface BaseBuilder {
+ /**
+ * Quote quotes a string so that it can be embedded in a SQL statement as a string value.
+ */
+ quote(s: string): string
+ }
+ interface BaseBuilder {
+ /**
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
+ */
+ quoteSimpleTableName(s: string): string
+ }
+ interface BaseBuilder {
+ /**
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
+ */
+ quoteSimpleColumnName(s: string): string
+ }
+ interface BaseBuilder {
+ /**
+ * Insert creates a Query that represents an INSERT SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ insert(table: string, cols: Params): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * Update creates a Query that represents an UPDATE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding new column
+ * values. If the "where" expression is nil, the UPDATE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will update ALL rows in the table).
+ */
+ update(table: string, cols: Params, where: Expression): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * Delete creates a Query that represents a DELETE SQL statement.
+ * If the "where" expression is nil, the DELETE SQL statement will have no WHERE clause
+ * (be careful in this case as the SQL statement will delete ALL rows in the table).
+ */
+ delete(table: string, where: Expression): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * CreateTable creates a Query that represents a CREATE TABLE SQL statement.
+ * The keys of cols are the column names, while the values of cols are the corresponding column types.
+ * The optional "options" parameters will be appended to the generated SQL statement.
+ */
+ createTable(table: string, cols: _TygojaDict, ...options: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * DropTable creates a Query that can be used to drop a table.
+ */
+ dropTable(table: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * TruncateTable creates a Query that can be used to truncate a table.
+ */
+ truncateTable(table: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * AddColumn creates a Query that can be used to add a column to a table.
+ */
+ addColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * DropColumn creates a Query that can be used to drop a column from a table.
+ */
+ dropColumn(table: string, col: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * RenameColumn creates a Query that can be used to rename a column in a table.
+ */
+ renameColumn(table: string, oldName: string, newName: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
+ */
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
+ * The "name" parameter specifies the name of the primary key constraint.
+ */
+ addPrimaryKey(table: string, name: string, ...cols: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
+ */
+ dropPrimaryKey(table: string, name: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
+ * The length of cols and refCols must be the same as they refer to the primary and referential columns.
+ * The optional "options" parameters will be appended to the SQL statement. They can be used to
+ * specify options such as "ON DELETE CASCADE".
+ */
+ addForeignKey(table: string, name: string, cols: Array, refCols: Array, refTable: string, ...options: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
+ */
+ dropForeignKey(table: string, name: string): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * CreateIndex creates a Query that can be used to create an index for a table.
+ */
+ createIndex(table: string, name: string, ...cols: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * CreateUniqueIndex creates a Query that can be used to create a unique index for a table.
+ */
+ createUniqueIndex(table: string, name: string, ...cols: string[]): (Query)
+ }
+ interface BaseBuilder {
+ /**
+ * DropIndex creates a Query that can be used to remove the named index from a table.
+ */
+ dropIndex(table: string, name: string): (Query)
+ }
+ /**
+ * MssqlBuilder is the builder for SQL Server databases.
+ */
+ type _sOgBiBI = BaseBuilder
+ interface MssqlBuilder extends _sOgBiBI {
+ }
+ /**
+ * MssqlQueryBuilder is the query builder for SQL Server databases.
+ */
+ type _sRHlmCE = BaseQueryBuilder
+ interface MssqlQueryBuilder extends _sRHlmCE {
+ }
+ interface newMssqlBuilder {
+ /**
+ * NewMssqlBuilder creates a new MssqlBuilder instance.
+ */
+ (db: DB, executor: Executor): Builder
+ }
+ interface MssqlBuilder {
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ }
+ interface MssqlBuilder {
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface MssqlBuilder {
+ /**
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
+ */
+ model(model: {
+ }): (ModelQuery)
+ }
+ interface MssqlBuilder {
+ /**
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
+ */
+ quoteSimpleTableName(s: string): string
+ }
+ interface MssqlBuilder {
+ /**
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
+ */
+ quoteSimpleColumnName(s: string): string
+ }
+ interface MssqlBuilder {
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ }
+ interface MssqlBuilder {
+ /**
+ * RenameColumn creates a Query that can be used to rename a column in a table.
+ */
+ renameColumn(table: string, oldName: string, newName: string): (Query)
+ }
+ interface MssqlBuilder {
+ /**
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
+ */
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface MssqlQueryBuilder {
+ /**
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ */
+ buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
+ }
+ /**
+ * MysqlBuilder is the builder for MySQL databases.
+ */
+ type _syGNUZb = BaseBuilder
+ interface MysqlBuilder extends _syGNUZb {
+ }
+ interface newMysqlBuilder {
+ /**
+ * NewMysqlBuilder creates a new MysqlBuilder instance.
+ */
+ (db: DB, executor: Executor): Builder
+ }
+ interface MysqlBuilder {
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ }
+ interface MysqlBuilder {
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface MysqlBuilder {
+ /**
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
+ */
+ model(model: {
+ }): (ModelQuery)
+ }
+ interface MysqlBuilder {
+ /**
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
+ */
+ quoteSimpleTableName(s: string): string
+ }
+ interface MysqlBuilder {
+ /**
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
+ */
+ quoteSimpleColumnName(s: string): string
+ }
+ interface MysqlBuilder {
+ /**
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ }
+ interface MysqlBuilder {
+ /**
+ * RenameColumn creates a Query that can be used to rename a column in a table.
+ */
+ renameColumn(table: string, oldName: string, newName: string): (Query)
+ }
+ interface MysqlBuilder {
+ /**
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
+ */
+ dropPrimaryKey(table: string, name: string): (Query)
+ }
+ interface MysqlBuilder {
+ /**
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
+ */
+ dropForeignKey(table: string, name: string): (Query)
+ }
+ /**
+ * OciBuilder is the builder for Oracle databases.
+ */
+ type _ssjJHcd = BaseBuilder
+ interface OciBuilder extends _ssjJHcd {
+ }
+ /**
+ * OciQueryBuilder is the query builder for Oracle databases.
+ */
+ type _spYCyyo = BaseQueryBuilder
+ interface OciQueryBuilder extends _spYCyyo {
+ }
+ interface newOciBuilder {
+ /**
+ * NewOciBuilder creates a new OciBuilder instance.
+ */
+ (db: DB, executor: Executor): Builder
+ }
+ interface OciBuilder {
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface OciBuilder {
+ /**
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
+ */
+ model(model: {
+ }): (ModelQuery)
+ }
+ interface OciBuilder {
+ /**
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ */
+ generatePlaceholder(i: number): string
+ }
+ interface OciBuilder {
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ }
+ interface OciBuilder {
+ /**
+ * DropIndex creates a Query that can be used to remove the named index from a table.
+ */
+ dropIndex(table: string, name: string): (Query)
+ }
+ interface OciBuilder {
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ }
+ interface OciBuilder {
+ /**
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
+ */
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface OciQueryBuilder {
+ /**
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ */
+ buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
+ }
+ /**
+ * PgsqlBuilder is the builder for PostgreSQL databases.
+ */
+ type _sECoPYn = BaseBuilder
+ interface PgsqlBuilder extends _sECoPYn {
+ }
+ interface newPgsqlBuilder {
+ /**
+ * NewPgsqlBuilder creates a new PgsqlBuilder instance.
+ */
+ (db: DB, executor: Executor): Builder
+ }
+ interface PgsqlBuilder {
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface PgsqlBuilder {
+ /**
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
+ */
+ model(model: {
+ }): (ModelQuery)
+ }
+ interface PgsqlBuilder {
+ /**
+ * GeneratePlaceholder generates an anonymous parameter placeholder with the given parameter ID.
+ */
+ generatePlaceholder(i: number): string
+ }
+ interface PgsqlBuilder {
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ }
+ interface PgsqlBuilder {
+ /**
+ * Upsert creates a Query that represents an UPSERT SQL statement.
+ * Upsert inserts a row into the table if the primary key or unique index is not found.
+ * Otherwise it will update the row with the new values.
+ * The keys of cols are the column names, while the values of cols are the corresponding column
+ * values to be inserted.
+ */
+ upsert(table: string, cols: Params, ...constraints: string[]): (Query)
+ }
+ interface PgsqlBuilder {
+ /**
+ * DropIndex creates a Query that can be used to remove the named index from a table.
+ */
+ dropIndex(table: string, name: string): (Query)
+ }
+ interface PgsqlBuilder {
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ }
+ interface PgsqlBuilder {
+ /**
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
+ */
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ /**
+ * SqliteBuilder is the builder for SQLite databases.
+ */
+ type _smFOrtL = BaseBuilder
+ interface SqliteBuilder extends _smFOrtL {
+ }
+ interface newSqliteBuilder {
+ /**
+ * NewSqliteBuilder creates a new SqliteBuilder instance.
+ */
+ (db: DB, executor: Executor): Builder
+ }
+ interface SqliteBuilder {
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ }
+ interface SqliteBuilder {
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface SqliteBuilder {
+ /**
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
+ */
+ model(model: {
+ }): (ModelQuery)
+ }
+ interface SqliteBuilder {
+ /**
+ * QuoteSimpleTableName quotes a simple table name.
+ * A simple table name does not contain any schema prefix.
+ */
+ quoteSimpleTableName(s: string): string
+ }
+ interface SqliteBuilder {
+ /**
+ * QuoteSimpleColumnName quotes a simple column name.
+ * A simple column name does not contain any table prefix.
+ */
+ quoteSimpleColumnName(s: string): string
+ }
+ interface SqliteBuilder {
+ /**
+ * DropIndex creates a Query that can be used to remove the named index from a table.
+ */
+ dropIndex(table: string, name: string): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * TruncateTable creates a Query that can be used to truncate a table.
+ */
+ truncateTable(table: string): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * RenameTable creates a Query that can be used to rename a table.
+ */
+ renameTable(oldName: string, newName: string): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * AlterColumn creates a Query that can be used to change the definition of a table column.
+ */
+ alterColumn(table: string, col: string, typ: string): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * AddPrimaryKey creates a Query that can be used to specify primary key(s) for a table.
+ * The "name" parameter specifies the name of the primary key constraint.
+ */
+ addPrimaryKey(table: string, name: string, ...cols: string[]): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * DropPrimaryKey creates a Query that can be used to remove the named primary key constraint from a table.
+ */
+ dropPrimaryKey(table: string, name: string): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * AddForeignKey creates a Query that can be used to add a foreign key constraint to a table.
+ * The length of cols and refCols must be the same as they refer to the primary and referential columns.
+ * The optional "options" parameters will be appended to the SQL statement. They can be used to
+ * specify options such as "ON DELETE CASCADE".
+ */
+ addForeignKey(table: string, name: string, cols: Array, refCols: Array, refTable: string, ...options: string[]): (Query)
+ }
+ interface SqliteBuilder {
+ /**
+ * DropForeignKey creates a Query that can be used to remove the named foreign key constraint from a table.
+ */
+ dropForeignKey(table: string, name: string): (Query)
+ }
+ /**
+ * StandardBuilder is the builder that is used by DB for an unknown driver.
+ */
+ type _sJbPsvb = BaseBuilder
+ interface StandardBuilder extends _sJbPsvb {
+ }
+ interface newStandardBuilder {
+ /**
+ * NewStandardBuilder creates a new StandardBuilder instance.
+ */
+ (db: DB, executor: Executor): Builder
+ }
+ interface StandardBuilder {
+ /**
+ * QueryBuilder returns the query builder supporting the current DB.
+ */
+ queryBuilder(): QueryBuilder
+ }
+ interface StandardBuilder {
+ /**
+ * Select returns a new SelectQuery object that can be used to build a SELECT statement.
+ * The parameters to this method should be the list column names to be selected.
+ * A column name may have an optional alias name. For example, Select("id", "my_name AS name").
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface StandardBuilder {
+ /**
+ * Model returns a new ModelQuery object that can be used to perform model-based DB operations.
+ * The model passed to this method should be a pointer to a model struct.
+ */
+ model(model: {
+ }): (ModelQuery)
+ }
+ /**
+ * LogFunc logs a message for each SQL statement being executed.
+ * This method takes one or multiple parameters. If a single parameter
+ * is provided, it will be treated as the log message. If multiple parameters
+ * are provided, they will be passed to fmt.Sprintf() to generate the log message.
+ */
+ interface LogFunc {(format: string, ...a: {
+ }[]): void }
+ /**
+ * PerfFunc is called when a query finishes execution.
+ * The query execution time is passed to this function so that the DB performance
+ * can be profiled. The "ns" parameter gives the number of nanoseconds that the
+ * SQL statement takes to execute, while the "execute" parameter indicates whether
+ * the SQL statement is executed or queried (usually SELECT statements).
+ */
+ interface PerfFunc {(ns: number, sql: string, execute: boolean): void }
+ /**
+ * QueryLogFunc is called each time when performing a SQL query.
+ * The "t" parameter gives the time that the SQL statement takes to execute,
+ * while rows and err are the result of the query.
+ */
+ interface QueryLogFunc {(ctx: context.Context, t: time.Duration, sql: string, rows: sql.Rows, err: Error): void }
+ /**
+ * ExecLogFunc is called each time when a SQL statement is executed.
+ * The "t" parameter gives the time that the SQL statement takes to execute,
+ * while result and err refer to the result of the execution.
+ */
+ interface ExecLogFunc {(ctx: context.Context, t: time.Duration, sql: string, result: sql.Result, err: Error): void }
+ /**
+ * BuilderFunc creates a Builder instance using the given DB instance and Executor.
+ */
+ interface BuilderFunc {(_arg0: DB, _arg1: Executor): Builder }
+ /**
+ * DB enhances sql.DB by providing a set of DB-agnostic query building methods.
+ * DB allows easier query building and population of data into Go variables.
+ */
+ type _shaNpOq = Builder
+ interface DB extends _shaNpOq {
+ /**
+ * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc.
+ */
+ fieldMapper: FieldMapFunc
+ /**
+ * TableMapper maps structs to table names. Defaults to GetTableName.
+ */
+ tableMapper: TableMapFunc
+ /**
+ * LogFunc logs the SQL statements being executed. Defaults to nil, meaning no logging.
+ */
+ logFunc: LogFunc
+ /**
+ * PerfFunc logs the SQL execution time. Defaults to nil, meaning no performance profiling.
+ * Deprecated: Please use QueryLogFunc and ExecLogFunc instead.
+ */
+ perfFunc: PerfFunc
+ /**
+ * QueryLogFunc is called each time when performing a SQL query that returns data.
+ */
+ queryLogFunc: QueryLogFunc
+ /**
+ * ExecLogFunc is called each time when a SQL statement is executed.
+ */
+ execLogFunc: ExecLogFunc
+ }
+ /**
+ * Errors represents a list of errors.
+ */
+ interface Errors extends Array{}
+ interface newFromDB {
+ /**
+ * NewFromDB encapsulates an existing database connection.
+ */
+ (sqlDB: sql.DB, driverName: string): (DB)
+ }
+ interface open {
+ /**
+ * Open opens a database specified by a driver name and data source name (DSN).
+ * Note that Open does not check if DSN is specified correctly. It doesn't try to establish a DB connection either.
+ * Please refer to sql.Open() for more information.
+ */
+ (driverName: string, dsn: string): (DB)
+ }
+ interface mustOpen {
+ /**
+ * MustOpen opens a database and establishes a connection to it.
+ * Please refer to sql.Open() and sql.Ping() for more information.
+ */
+ (driverName: string, dsn: string): (DB)
+ }
+ interface DB {
+ /**
+ * Clone makes a shallow copy of DB.
+ */
+ clone(): (DB)
+ }
+ interface DB {
+ /**
+ * WithContext returns a new instance of DB associated with the given context.
+ */
+ withContext(ctx: context.Context): (DB)
+ }
+ interface DB {
+ /**
+ * Context returns the context associated with the DB instance.
+ * It returns nil if no context is associated.
+ */
+ context(): context.Context
+ }
+ interface DB {
+ /**
+ * DB returns the sql.DB instance encapsulated by dbx.DB.
+ */
+ db(): (sql.DB)
+ }
+ interface DB {
+ /**
+ * Close closes the database, releasing any open resources.
+ * It is rare to Close a DB, as the DB handle is meant to be
+ * long-lived and shared between many goroutines.
+ */
+ close(): void
+ }
+ interface DB {
+ /**
+ * Begin starts a transaction.
+ */
+ begin(): (Tx)
+ }
+ interface DB {
+ /**
+ * BeginTx starts a transaction with the given context and transaction options.
+ */
+ beginTx(ctx: context.Context, opts: sql.TxOptions): (Tx)
+ }
+ interface DB {
+ /**
+ * Wrap encapsulates an existing transaction.
+ */
+ wrap(sqlTx: sql.Tx): (Tx)
+ }
+ interface DB {
+ /**
+ * Transactional starts a transaction and executes the given function.
+ * If the function returns an error, the transaction will be rolled back.
+ * Otherwise, the transaction will be committed.
+ */
+ transactional(f: (_arg0: Tx) => void): void
+ }
+ interface DB {
+ /**
+ * TransactionalContext starts a transaction and executes the given function with the given context and transaction options.
+ * If the function returns an error, the transaction will be rolled back.
+ * Otherwise, the transaction will be committed.
+ */
+ transactionalContext(ctx: context.Context, opts: sql.TxOptions, f: (_arg0: Tx) => void): void
+ }
+ interface DB {
+ /**
+ * DriverName returns the name of the DB driver.
+ */
+ driverName(): string
+ }
+ interface DB {
+ /**
+ * QuoteTableName quotes the given table name appropriately.
+ * If the table name contains DB schema prefix, it will be handled accordingly.
+ * This method will do nothing if the table name is already quoted or if it contains parenthesis.
+ */
+ quoteTableName(s: string): string
+ }
+ interface DB {
+ /**
+ * QuoteColumnName quotes the given column name appropriately.
+ * If the table name contains table name prefix, it will be handled accordingly.
+ * This method will do nothing if the column name is already quoted or if it contains parenthesis.
+ */
+ quoteColumnName(s: string): string
+ }
+ interface Errors {
+ /**
+ * Error returns the error string of Errors.
+ */
+ error(): string
+ }
+ /**
+ * Expression represents a DB expression that can be embedded in a SQL statement.
+ */
+ interface Expression {
+ [key:string]: any;
+ /**
+ * Build converts an expression into a SQL fragment.
+ * If the expression contains binding parameters, they will be added to the given Params.
+ */
+ build(_arg0: DB, _arg1: Params): string
+ }
+ /**
+ * HashExp represents a hash expression.
+ *
+ * A hash expression is a map whose keys are DB column names which need to be filtered according
+ * to the corresponding values. For example, HashExp{"level": 2, "dept": 10} will generate
+ * the SQL: "level"=2 AND "dept"=10.
+ *
+ * HashExp also handles nil values and slice values. For example, HashExp{"level": []interface{}{1, 2}, "dept": nil}
+ * will generate: "level" IN (1, 2) AND "dept" IS NULL.
+ */
+ interface HashExp extends _TygojaDict{}
+ interface newExp {
+ /**
+ * NewExp generates an expression with the specified SQL fragment and the optional binding parameters.
+ */
+ (e: string, ...params: Params[]): Expression
+ }
+ interface not {
+ /**
+ * Not generates a NOT expression which prefixes "NOT" to the specified expression.
+ */
+ (e: Expression): Expression
+ }
+ interface and {
+ /**
+ * And generates an AND expression which concatenates the given expressions with "AND".
+ */
+ (...exps: Expression[]): Expression
+ }
+ interface or {
+ /**
+ * Or generates an OR expression which concatenates the given expressions with "OR".
+ */
+ (...exps: Expression[]): Expression
+ }
+ interface _in {
+ /**
+ * In generates an IN expression for the specified column and the list of allowed values.
+ * If values is empty, a SQL "0=1" will be generated which represents a false expression.
+ */
+ (col: string, ...values: {
+ }[]): Expression
+ }
+ interface notIn {
+ /**
+ * NotIn generates an NOT IN expression for the specified column and the list of disallowed values.
+ * If values is empty, an empty string will be returned indicating a true expression.
+ */
+ (col: string, ...values: {
+ }[]): Expression
+ }
+ interface like {
+ /**
+ * Like generates a LIKE expression for the specified column and the possible strings that the column should be like.
+ * If multiple values are present, the column should be like *all* of them. For example, Like("name", "key", "word")
+ * will generate a SQL expression: "name" LIKE "%key%" AND "name" LIKE "%word%".
+ *
+ * By default, each value will be surrounded by "%" to enable partial matching. If a value contains special characters
+ * such as "%", "\", "_", they will also be properly escaped.
+ *
+ * You may call Escape() and/or Match() to change the default behavior. For example, Like("name", "key").Match(false, true)
+ * generates "name" LIKE "key%".
+ */
+ (col: string, ...values: string[]): (LikeExp)
+ }
+ interface notLike {
+ /**
+ * NotLike generates a NOT LIKE expression.
+ * For example, NotLike("name", "key", "word") will generate a SQL expression:
+ * "name" NOT LIKE "%key%" AND "name" NOT LIKE "%word%". Please see Like() for more details.
+ */
+ (col: string, ...values: string[]): (LikeExp)
+ }
+ interface orLike {
+ /**
+ * OrLike generates an OR LIKE expression.
+ * This is similar to Like() except that the column should be like one of the possible values.
+ * For example, OrLike("name", "key", "word") will generate a SQL expression:
+ * "name" LIKE "%key%" OR "name" LIKE "%word%". Please see Like() for more details.
+ */
+ (col: string, ...values: string[]): (LikeExp)
+ }
+ interface orNotLike {
+ /**
+ * OrNotLike generates an OR NOT LIKE expression.
+ * For example, OrNotLike("name", "key", "word") will generate a SQL expression:
+ * "name" NOT LIKE "%key%" OR "name" NOT LIKE "%word%". Please see Like() for more details.
+ */
+ (col: string, ...values: string[]): (LikeExp)
+ }
+ interface exists {
+ /**
+ * Exists generates an EXISTS expression by prefixing "EXISTS" to the given expression.
+ */
+ (exp: Expression): Expression
+ }
+ interface notExists {
+ /**
+ * NotExists generates an EXISTS expression by prefixing "NOT EXISTS" to the given expression.
+ */
+ (exp: Expression): Expression
+ }
+ interface between {
+ /**
+ * Between generates a BETWEEN expression.
+ * For example, Between("age", 10, 30) generates: "age" BETWEEN 10 AND 30
+ */
+ (col: string, from: {
+ }, to: {
+ }): Expression
+ }
+ interface notBetween {
+ /**
+ * NotBetween generates a NOT BETWEEN expression.
+ * For example, NotBetween("age", 10, 30) generates: "age" NOT BETWEEN 10 AND 30
+ */
+ (col: string, from: {
+ }, to: {
+ }): Expression
+ }
+ /**
+ * Exp represents an expression with a SQL fragment and a list of optional binding parameters.
+ */
+ interface Exp {
+ }
+ interface Exp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ interface HashExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * NotExp represents an expression that should prefix "NOT" to a specified expression.
+ */
+ interface NotExp {
+ }
+ interface NotExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * AndOrExp represents an expression that concatenates multiple expressions using either "AND" or "OR".
+ */
+ interface AndOrExp {
+ }
+ interface AndOrExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * InExp represents an "IN" or "NOT IN" expression.
+ */
+ interface InExp {
+ }
+ interface InExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * LikeExp represents a variant of LIKE expressions.
+ */
+ interface LikeExp {
+ /**
+ * Like stores the LIKE operator. It can be "LIKE", "NOT LIKE".
+ * It may also be customized as something like "ILIKE".
+ */
+ like: string
+ }
+ interface LikeExp {
+ /**
+ * Escape specifies how a LIKE expression should be escaped.
+ * Each string at position 2i represents a special character and the string at position 2i+1 is
+ * the corresponding escaped version.
+ */
+ escape(...chars: string[]): (LikeExp)
+ }
+ interface LikeExp {
+ /**
+ * Match specifies whether to do wildcard matching on the left and/or right of given strings.
+ */
+ match(left: boolean, right: boolean): (LikeExp)
+ }
+ interface LikeExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * ExistsExp represents an EXISTS or NOT EXISTS expression.
+ */
+ interface ExistsExp {
+ }
+ interface ExistsExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * BetweenExp represents a BETWEEN or a NOT BETWEEN expression.
+ */
+ interface BetweenExp {
+ }
+ interface BetweenExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ interface enclose {
+ /**
+ * Enclose surrounds the provided nonempty expression with parenthesis "()".
+ */
+ (exp: Expression): Expression
+ }
+ /**
+ * EncloseExp represents a parenthesis enclosed expression.
+ */
+ interface EncloseExp {
+ }
+ interface EncloseExp {
+ /**
+ * Build converts an expression into a SQL fragment.
+ */
+ build(db: DB, params: Params): string
+ }
+ /**
+ * TableModel is the interface that should be implemented by models which have unconventional table names.
+ */
+ interface TableModel {
+ [key:string]: any;
+ tableName(): string
+ }
+ /**
+ * ModelQuery represents a query associated with a struct model.
+ */
+ interface ModelQuery {
+ }
+ interface newModelQuery {
+ (model: {
+ }, fieldMapFunc: FieldMapFunc, db: DB, builder: Builder): (ModelQuery)
+ }
+ interface ModelQuery {
+ /**
+ * Context returns the context associated with the query.
+ */
+ context(): context.Context
+ }
+ interface ModelQuery {
+ /**
+ * WithContext associates a context with the query.
+ */
+ withContext(ctx: context.Context): (ModelQuery)
+ }
+ interface ModelQuery {
+ /**
+ * Exclude excludes the specified struct fields from being inserted/updated into the DB table.
+ */
+ exclude(...attrs: string[]): (ModelQuery)
+ }
+ interface ModelQuery {
+ /**
+ * Insert inserts a row in the table using the struct model associated with this query.
+ *
+ * By default, it inserts *all* public fields into the table, including those nil or empty ones.
+ * You may pass a list of the fields to this method to indicate that only those fields should be inserted.
+ * You may also call Exclude to exclude some fields from being inserted.
+ *
+ * If a model has an empty primary key, it is considered auto-incremental and the corresponding struct
+ * field will be filled with the generated primary key value after a successful insertion.
+ */
+ insert(...attrs: string[]): void
+ }
+ interface ModelQuery {
+ /**
+ * Update updates a row in the table using the struct model associated with this query.
+ * The row being updated has the same primary key as specified by the model.
+ *
+ * By default, it updates *all* public fields in the table, including those nil or empty ones.
+ * You may pass a list of the fields to this method to indicate that only those fields should be updated.
+ * You may also call Exclude to exclude some fields from being updated.
+ */
+ update(...attrs: string[]): void
+ }
+ interface ModelQuery {
+ /**
+ * Delete deletes a row in the table using the primary key specified by the struct model associated with this query.
+ */
+ delete(): void
+ }
+ /**
+ * ExecHookFunc executes before op allowing custom handling like auto fail/retry.
+ */
+ interface ExecHookFunc {(q: Query, op: () => void): void }
+ /**
+ * OneHookFunc executes right before the query populate the row result from One() call (aka. op).
+ */
+ interface OneHookFunc {(q: Query, a: {
+ }, op: (b: {
+ }) => void): void }
+ /**
+ * AllHookFunc executes right before the query populate the row result from All() call (aka. op).
+ */
+ interface AllHookFunc {(q: Query, sliceA: {
+ }, op: (sliceB: {
+ }) => void): void }
+ /**
+ * Params represents a list of parameter values to be bound to a SQL statement.
+ * The map keys are the parameter names while the map values are the corresponding parameter values.
+ */
+ interface Params extends _TygojaDict{}
+ /**
+ * Executor prepares, executes, or queries a SQL statement.
+ */
+ interface Executor {
+ [key:string]: any;
+ /**
+ * Exec executes a SQL statement
+ */
+ exec(query: string, ...args: {
+ }[]): sql.Result
+ /**
+ * ExecContext executes a SQL statement with the given context
+ */
+ execContext(ctx: context.Context, query: string, ...args: {
+ }[]): sql.Result
+ /**
+ * Query queries a SQL statement
+ */
+ query(query: string, ...args: {
+ }[]): (sql.Rows)
+ /**
+ * QueryContext queries a SQL statement with the given context
+ */
+ queryContext(ctx: context.Context, query: string, ...args: {
+ }[]): (sql.Rows)
+ /**
+ * Prepare creates a prepared statement
+ */
+ prepare(query: string): (sql.Stmt)
+ }
+ /**
+ * Query represents a SQL statement to be executed.
+ */
+ interface Query {
+ /**
+ * FieldMapper maps struct field names to DB column names.
+ */
+ fieldMapper: FieldMapFunc
+ /**
+ * LastError contains the last error (if any) of the query.
+ * LastError is cleared by Execute(), Row(), Rows(), One(), and All().
+ */
+ lastError: Error
+ /**
+ * LogFunc is used to log the SQL statement being executed.
+ */
+ logFunc: LogFunc
+ /**
+ * PerfFunc is used to log the SQL execution time. It is ignored if nil.
+ * Deprecated: Please use QueryLogFunc and ExecLogFunc instead.
+ */
+ perfFunc: PerfFunc
+ /**
+ * QueryLogFunc is called each time when performing a SQL query that returns data.
+ */
+ queryLogFunc: QueryLogFunc
+ /**
+ * ExecLogFunc is called each time when a SQL statement is executed.
+ */
+ execLogFunc: ExecLogFunc
+ }
+ interface newQuery {
+ /**
+ * NewQuery creates a new Query with the given SQL statement.
+ */
+ (db: DB, executor: Executor, sql: string): (Query)
+ }
+ interface Query {
+ /**
+ * SQL returns the original SQL used to create the query.
+ * The actual SQL (RawSQL) being executed is obtained by replacing the named
+ * parameter placeholders with anonymous ones.
+ */
+ sql(): string
+ }
+ interface Query {
+ /**
+ * Context returns the context associated with the query.
+ */
+ context(): context.Context
+ }
+ interface Query {
+ /**
+ * WithContext associates a context with the query.
+ */
+ withContext(ctx: context.Context): (Query)
+ }
+ interface Query {
+ /**
+ * WithExecHook associates the provided exec hook function with the query.
+ *
+ * It is called for every Query resolver (Execute(), One(), All(), Row(), Column()),
+ * allowing you to implement auto fail/retry or any other additional handling.
+ */
+ withExecHook(fn: ExecHookFunc): (Query)
+ }
+ interface Query {
+ /**
+ * WithOneHook associates the provided hook function with the query,
+ * called on q.One(), allowing you to implement custom struct scan based
+ * on the One() argument and/or result.
+ */
+ withOneHook(fn: OneHookFunc): (Query)
+ }
+ interface Query {
+ /**
+ * WithOneHook associates the provided hook function with the query,
+ * called on q.All(), allowing you to implement custom slice scan based
+ * on the All() argument and/or result.
+ */
+ withAllHook(fn: AllHookFunc): (Query)
+ }
+ interface Query {
+ /**
+ * Params returns the parameters to be bound to the SQL statement represented by this query.
+ */
+ params(): Params
+ }
+ interface Query {
+ /**
+ * Prepare creates a prepared statement for later queries or executions.
+ * Close() should be called after finishing all queries.
+ */
+ prepare(): (Query)
+ }
+ interface Query {
+ /**
+ * Close closes the underlying prepared statement.
+ * Close does nothing if the query has not been prepared before.
+ */
+ close(): void
+ }
+ interface Query {
+ /**
+ * Bind sets the parameters that should be bound to the SQL statement.
+ * The parameter placeholders in the SQL statement are in the format of "{:ParamName}".
+ */
+ bind(params: Params): (Query)
+ }
+ interface Query {
+ /**
+ * Execute executes the SQL statement without retrieving data.
+ */
+ execute(): sql.Result
+ }
+ interface Query {
+ /**
+ * One executes the SQL statement and populates the first row of the result into a struct or NullStringMap.
+ * Refer to Rows.ScanStruct() and Rows.ScanMap() for more details on how to specify
+ * the variable to be populated.
+ * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
+ */
+ one(a: {
+ }): void
+ }
+ interface Query {
+ /**
+ * All executes the SQL statement and populates all the resulting rows into a slice of struct or NullStringMap.
+ * The slice must be given as a pointer. Each slice element must be either a struct or a NullStringMap.
+ * Refer to Rows.ScanStruct() and Rows.ScanMap() for more details on how each slice element can be.
+ * If the query returns no row, the slice will be an empty slice (not nil).
+ */
+ all(slice: {
+ }): void
+ }
+ interface Query {
+ /**
+ * Row executes the SQL statement and populates the first row of the result into a list of variables.
+ * Note that the number of the variables should match to that of the columns in the query result.
+ * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
+ */
+ row(...a: {
+ }[]): void
+ }
+ interface Query {
+ /**
+ * Column executes the SQL statement and populates the first column of the result into a slice.
+ * Note that the parameter must be a pointer to a slice.
+ */
+ column(a: {
+ }): void
+ }
+ interface Query {
+ /**
+ * Rows executes the SQL statement and returns a Rows object to allow retrieving data row by row.
+ */
+ rows(): (Rows)
+ }
+ /**
+ * QueryBuilder builds different clauses for a SELECT SQL statement.
+ */
+ interface QueryBuilder {
+ [key:string]: any;
+ /**
+ * BuildSelect generates a SELECT clause from the given selected column names.
+ */
+ buildSelect(cols: Array, distinct: boolean, option: string): string
+ /**
+ * BuildFrom generates a FROM clause from the given tables.
+ */
+ buildFrom(tables: Array): string
+ /**
+ * BuildGroupBy generates a GROUP BY clause from the given group-by columns.
+ */
+ buildGroupBy(cols: Array): string
+ /**
+ * BuildJoin generates a JOIN clause from the given join information.
+ */
+ buildJoin(_arg0: Array, _arg1: Params): string
+ /**
+ * BuildWhere generates a WHERE clause from the given expression.
+ */
+ buildWhere(_arg0: Expression, _arg1: Params): string
+ /**
+ * BuildHaving generates a HAVING clause from the given expression.
+ */
+ buildHaving(_arg0: Expression, _arg1: Params): string
+ /**
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ */
+ buildOrderByAndLimit(_arg0: string, _arg1: Array, _arg2: number, _arg3: number): string
+ /**
+ * BuildUnion generates a UNION clause from the given union information.
+ */
+ buildUnion(_arg0: Array, _arg1: Params): string
+ }
+ /**
+ * BaseQueryBuilder provides a basic implementation of QueryBuilder.
+ */
+ interface BaseQueryBuilder {
+ }
+ interface newBaseQueryBuilder {
+ /**
+ * NewBaseQueryBuilder creates a new BaseQueryBuilder instance.
+ */
+ (db: DB): (BaseQueryBuilder)
+ }
+ interface BaseQueryBuilder {
+ /**
+ * DB returns the DB instance associated with the query builder.
+ */
+ db(): (DB)
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildSelect generates a SELECT clause from the given selected column names.
+ */
+ buildSelect(cols: Array, distinct: boolean, option: string): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildFrom generates a FROM clause from the given tables.
+ */
+ buildFrom(tables: Array): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildJoin generates a JOIN clause from the given join information.
+ */
+ buildJoin(joins: Array, params: Params): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildWhere generates a WHERE clause from the given expression.
+ */
+ buildWhere(e: Expression, params: Params): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildHaving generates a HAVING clause from the given expression.
+ */
+ buildHaving(e: Expression, params: Params): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildGroupBy generates a GROUP BY clause from the given group-by columns.
+ */
+ buildGroupBy(cols: Array): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses.
+ */
+ buildOrderByAndLimit(sql: string, cols: Array, limit: number, offset: number): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildUnion generates a UNION clause from the given union information.
+ */
+ buildUnion(unions: Array, params: Params): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildOrderBy generates the ORDER BY clause.
+ */
+ buildOrderBy(cols: Array): string
+ }
+ interface BaseQueryBuilder {
+ /**
+ * BuildLimit generates the LIMIT clause.
+ */
+ buildLimit(limit: number, offset: number): string
+ }
+ /**
+ * VarTypeError indicates a variable type error when trying to populating a variable with DB result.
+ */
+ interface VarTypeError extends String{}
+ interface VarTypeError {
+ /**
+ * Error returns the error message.
+ */
+ error(): string
+ }
+ /**
+ * NullStringMap is a map of sql.NullString that can be used to hold DB query result.
+ * The map keys correspond to the DB column names, while the map values are their corresponding column values.
+ */
+ interface NullStringMap extends _TygojaDict{}
+ /**
+ * Rows enhances sql.Rows by providing additional data query methods.
+ * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row.
+ */
+ type _sKlUpIl = sql.Rows
+ interface Rows extends _sKlUpIl {
+ }
+ interface Rows {
+ /**
+ * ScanMap populates the current row of data into a NullStringMap.
+ * Note that the NullStringMap must not be nil, or it will panic.
+ * The NullStringMap will be populated using column names as keys and their values as
+ * the corresponding element values.
+ */
+ scanMap(a: NullStringMap): void
+ }
+ interface Rows {
+ /**
+ * ScanStruct populates the current row of data into a struct.
+ * The struct must be given as a pointer.
+ *
+ * ScanStruct associates struct fields with DB table columns through a field mapping function.
+ * It populates a struct field with the data of its associated column.
+ * Note that only exported struct fields will be populated.
+ *
+ * By default, DefaultFieldMapFunc() is used to map struct fields to table columns.
+ * This function separates each word in a field name with a underscore and turns every letter into lower case.
+ * For example, "LastName" is mapped to "last_name", "MyID" is mapped to "my_id", and so on.
+ * To change the default behavior, set DB.FieldMapper with your custom mapping function.
+ * You may also set Query.FieldMapper to change the behavior for particular queries.
+ */
+ scanStruct(a: {
+ }): void
+ }
+ /**
+ * BuildHookFunc defines a callback function that is executed on Query creation.
+ */
+ interface BuildHookFunc {(q: Query): void }
+ /**
+ * SelectQuery represents a DB-agnostic SELECT query.
+ * It can be built into a DB-specific query by calling the Build() method.
+ */
+ interface SelectQuery {
+ /**
+ * FieldMapper maps struct field names to DB column names.
+ */
+ fieldMapper: FieldMapFunc
+ /**
+ * TableMapper maps structs to DB table names.
+ */
+ tableMapper: TableMapFunc
+ }
+ /**
+ * JoinInfo contains the specification for a JOIN clause.
+ */
+ interface JoinInfo {
+ join: string
+ table: string
+ on: Expression
+ }
+ /**
+ * UnionInfo contains the specification for a UNION clause.
+ */
+ interface UnionInfo {
+ all: boolean
+ query?: Query
+ }
+ interface newSelectQuery {
+ /**
+ * NewSelectQuery creates a new SelectQuery instance.
+ */
+ (builder: Builder, db: DB): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * WithBuildHook runs the provided hook function with the query created on Build().
+ */
+ withBuildHook(fn: BuildHookFunc): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Context returns the context associated with the query.
+ */
+ context(): context.Context
+ }
+ interface SelectQuery {
+ /**
+ * WithContext associates a context with the query.
+ */
+ withContext(ctx: context.Context): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * PreFragment sets SQL fragment that should be prepended before the select query (e.g. WITH clause).
+ */
+ preFragment(fragment: string): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * PostFragment sets SQL fragment that should be appended at the end of the select query.
+ */
+ postFragment(fragment: string): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Select specifies the columns to be selected.
+ * Column names will be automatically quoted.
+ */
+ select(...cols: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * AndSelect adds additional columns to be selected.
+ * Column names will be automatically quoted.
+ */
+ andSelect(...cols: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Distinct specifies whether to select columns distinctively.
+ * By default, distinct is false.
+ */
+ distinct(v: boolean): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * SelectOption specifies additional option that should be append to "SELECT".
+ */
+ selectOption(option: string): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * From specifies which tables to select from.
+ * Table names will be automatically quoted.
+ */
+ from(...tables: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Where specifies the WHERE condition.
+ */
+ where(e: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * AndWhere concatenates a new WHERE condition with the existing one (if any) using "AND".
+ */
+ andWhere(e: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * OrWhere concatenates a new WHERE condition with the existing one (if any) using "OR".
+ */
+ orWhere(e: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Join specifies a JOIN clause.
+ * The "typ" parameter specifies the JOIN type (e.g. "INNER JOIN", "LEFT JOIN").
+ */
+ join(typ: string, table: string, on: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * InnerJoin specifies an INNER JOIN clause.
+ * This is a shortcut method for Join.
+ */
+ innerJoin(table: string, on: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * LeftJoin specifies a LEFT JOIN clause.
+ * This is a shortcut method for Join.
+ */
+ leftJoin(table: string, on: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * RightJoin specifies a RIGHT JOIN clause.
+ * This is a shortcut method for Join.
+ */
+ rightJoin(table: string, on: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * OrderBy specifies the ORDER BY clause.
+ * Column names will be properly quoted. A column name can contain "ASC" or "DESC" to indicate its ordering direction.
+ */
+ orderBy(...cols: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * AndOrderBy appends additional columns to the existing ORDER BY clause.
+ * Column names will be properly quoted. A column name can contain "ASC" or "DESC" to indicate its ordering direction.
+ */
+ andOrderBy(...cols: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * GroupBy specifies the GROUP BY clause.
+ * Column names will be properly quoted.
+ */
+ groupBy(...cols: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * AndGroupBy appends additional columns to the existing GROUP BY clause.
+ * Column names will be properly quoted.
+ */
+ andGroupBy(...cols: string[]): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Having specifies the HAVING clause.
+ */
+ having(e: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * AndHaving concatenates a new HAVING condition with the existing one (if any) using "AND".
+ */
+ andHaving(e: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * OrHaving concatenates a new HAVING condition with the existing one (if any) using "OR".
+ */
+ orHaving(e: Expression): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Union specifies a UNION clause.
+ */
+ union(q: Query): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * UnionAll specifies a UNION ALL clause.
+ */
+ unionAll(q: Query): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Limit specifies the LIMIT clause.
+ * A negative limit means no limit.
+ */
+ limit(limit: number): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Offset specifies the OFFSET clause.
+ * A negative offset means no offset.
+ */
+ offset(offset: number): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Bind specifies the parameter values to be bound to the query.
+ */
+ bind(params: Params): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * AndBind appends additional parameters to be bound to the query.
+ */
+ andBind(params: Params): (SelectQuery)
+ }
+ interface SelectQuery {
+ /**
+ * Build builds the SELECT query and returns an executable Query object.
+ */
+ build(): (Query)
+ }
+ interface SelectQuery {
+ /**
+ * One executes the SELECT query and populates the first row of the result into the specified variable.
+ *
+ * If the query does not specify a "from" clause, the method will try to infer the name of the table
+ * to be selected from by calling getTableName() which will return either the variable type name
+ * or the TableName() method if the variable implements the TableModel interface.
+ *
+ * Note that when the query has no rows in the result set, an sql.ErrNoRows will be returned.
+ */
+ one(a: {
+ }): void
+ }
+ interface SelectQuery {
+ /**
+ * Model selects the row with the specified primary key and populates the model with the row data.
+ *
+ * The model variable should be a pointer to a struct. If the query does not specify a "from" clause,
+ * it will use the model struct to determine which table to select data from. It will also use the model
+ * to infer the name of the primary key column. Only simple primary key is supported. For composite primary keys,
+ * please use Where() to specify the filtering condition.
+ */
+ model(pk: {
+ }, model: {
+ }): void
+ }
+ interface SelectQuery {
+ /**
+ * All executes the SELECT query and populates all rows of the result into a slice.
+ *
+ * Note that the slice must be passed in as a pointer.
+ *
+ * If the query does not specify a "from" clause, the method will try to infer the name of the table
+ * to be selected from by calling getTableName() which will return either the type name of the slice elements
+ * or the TableName() method if the slice element implements the TableModel interface.
+ */
+ all(slice: {
+ }): void
+ }
+ interface SelectQuery {
+ /**
+ * Rows builds and executes the SELECT query and returns a Rows object for data retrieval purpose.
+ * This is a shortcut to SelectQuery.Build().Rows()
+ */
+ rows(): (Rows)
+ }
+ interface SelectQuery {
+ /**
+ * Row builds and executes the SELECT query and populates the first row of the result into the specified variables.
+ * This is a shortcut to SelectQuery.Build().Row()
+ */
+ row(...a: {
+ }[]): void
+ }
+ interface SelectQuery {
+ /**
+ * Column builds and executes the SELECT statement and populates the first column of the result into a slice.
+ * Note that the parameter must be a pointer to a slice.
+ * This is a shortcut to SelectQuery.Build().Column()
+ */
+ column(a: {
+ }): void
+ }
+ /**
+ * QueryInfo represents a debug/info struct with exported SelectQuery fields.
+ */
+ interface QueryInfo {
+ preFragment: string
+ postFragment: string
+ builder: Builder
+ selects: Array
+ distinct: boolean
+ selectOption: string
+ from: Array
+ where: Expression
+ join: Array
+ orderBy: Array
+ groupBy: Array
+ having: Expression
+ union: Array
+ limit: number
+ offset: number
+ params: Params
+ context: context.Context
+ buildHook: BuildHookFunc
+ }
+ interface SelectQuery {
+ /**
+ * Info exports common SelectQuery fields allowing to inspect the
+ * current select query options.
+ */
+ info(): (QueryInfo)
+ }
+ /**
+ * FieldMapFunc converts a struct field name into a DB column name.
+ */
+ interface FieldMapFunc {(_arg0: string): string }
+ /**
+ * TableMapFunc converts a sample struct into a DB table name.
+ */
+ interface TableMapFunc {(a: {
+ }): string }
+ interface structInfo {
+ }
+ type _sjTuviq = structInfo
+ interface structValue extends _sjTuviq {
+ }
+ interface fieldInfo {
+ }
+ interface structInfoMapKey {
+ }
+ /**
+ * PostScanner is an optional interface used by ScanStruct.
+ */
+ interface PostScanner {
+ [key:string]: any;
+ /**
+ * PostScan executes right after the struct has been populated
+ * with the DB values, allowing you to further normalize or validate
+ * the loaded data.
+ */
+ postScan(): void
+ }
+ interface defaultFieldMapFunc {
+ /**
+ * DefaultFieldMapFunc maps a field name to a DB column name.
+ * The mapping rule set by this method is that words in a field name will be separated by underscores
+ * and the name will be turned into lower case. For example, "FirstName" maps to "first_name", and "MyID" becomes "my_id".
+ * See DB.FieldMapper for more details.
+ */
+ (f: string): string
+ }
+ interface getTableName {
+ /**
+ * GetTableName implements the default way of determining the table name corresponding to the given model struct
+ * or slice of structs. To get the actual table name for a model, you should use DB.TableMapFunc() instead.
+ * Do not call this method in a model's TableName() method because it will cause infinite loop.
+ */
+ (a: {
+ }): string
+ }
+ /**
+ * Tx enhances sql.Tx with additional querying methods.
+ */
+ type _sxeFDlS = Builder
+ interface Tx extends _sxeFDlS {
+ }
+ interface Tx {
+ /**
+ * Commit commits the transaction.
+ */
+ commit(): void
+ }
+ interface Tx {
+ /**
+ * Rollback aborts the transaction.
+ */
+ rollback(): void
+ }
+}
+
+namespace filesystem {
+ /**
+ * FileReader defines an interface for a file resource reader.
+ */
+ interface FileReader {
+ [key:string]: any;
+ open(): io.ReadSeekCloser
+ }
+ /**
+ * File defines a single file [io.ReadSeekCloser] resource.
+ *
+ * The file could be from a local path, multipart/form-data header, etc.
+ */
+ interface File {
+ reader: FileReader
+ name: string
+ originalName: string
+ size: number
+ }
+ interface File {
+ /**
+ * AsMap implements [core.mapExtractor] and returns a value suitable
+ * to be used in an API rule expression.
+ */
+ asMap(): _TygojaDict
+ }
+ interface newFileFromPath {
+ /**
+ * NewFileFromPath creates a new File instance from the provided local file path.
+ */
+ (path: string): (File)
+ }
+ interface newFileFromBytes {
+ /**
+ * NewFileFromBytes creates a new File instance from the provided byte slice.
+ */
+ (b: string|Array, name: string): (File)
+ }
+ interface newFileFromMultipart {
+ /**
+ * NewFileFromMultipart creates a new File from the provided multipart header.
+ */
+ (mh: multipart.FileHeader): (File)
+ }
+ interface newFileFromURL {
+ /**
+ * NewFileFromURL creates a new File from the provided url by
+ * downloading the resource and load it as BytesReader.
+ *
+ * Example
+ *
+ * ```
+ * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ * defer cancel()
+ *
+ * file, err := filesystem.NewFileFromURL(ctx, "https://example.com/image.png")
+ * ```
+ */
+ (ctx: context.Context, url: string): (File)
+ }
+ /**
+ * MultipartReader defines a FileReader from [multipart.FileHeader].
+ */
+ interface MultipartReader {
+ header?: multipart.FileHeader
+ }
+ interface MultipartReader {
+ /**
+ * Open implements the [filesystem.FileReader] interface.
+ */
+ open(): io.ReadSeekCloser
+ }
+ /**
+ * PathReader defines a FileReader from a local file path.
+ */
+ interface PathReader {
+ path: string
+ }
+ interface PathReader {
+ /**
+ * Open implements the [filesystem.FileReader] interface.
+ */
+ open(): io.ReadSeekCloser
+ }
+ /**
+ * BytesReader defines a FileReader from bytes content.
+ */
+ interface BytesReader {
+ bytes: string|Array
+ }
+ interface BytesReader {
+ /**
+ * Open implements the [filesystem.FileReader] interface.
+ */
+ open(): io.ReadSeekCloser
+ }
+ type _ssuzizV = bytes.Reader
+ interface bytesReadSeekCloser extends _ssuzizV {
+ }
+ interface bytesReadSeekCloser {
+ /**
+ * Close implements the [io.ReadSeekCloser] interface.
+ */
+ close(): void
+ }
+ /**
+ * openFuncAsReader defines a FileReader from a bare Open function.
+ */
+ interface openFuncAsReader {(): io.ReadSeekCloser }
+ interface openFuncAsReader {
+ /**
+ * Open implements the [filesystem.FileReader] interface.
+ */
+ open(): io.ReadSeekCloser
+ }
+ interface System {
+ }
+ interface newS3 {
+ /**
+ * NewS3 initializes an S3 filesystem instance.
+ *
+ * NB! Make sure to call `Close()` after you are done working with it.
+ */
+ (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System)
+ }
+ interface newLocal {
+ /**
+ * NewLocal initializes a new local filesystem instance.
+ *
+ * NB! Make sure to call `Close()` after you are done working with it.
+ */
+ (dirPath: string): (System)
+ }
+ interface System {
+ /**
+ * SetContext assigns the specified context to the current filesystem.
+ */
+ setContext(ctx: context.Context): void
+ }
+ interface System {
+ /**
+ * Close releases any resources used for the related filesystem.
+ */
+ close(): void
+ }
+ interface System {
+ /**
+ * Exists checks if file with fileKey path exists or not.
+ */
+ exists(fileKey: string): boolean
+ }
+ interface System {
+ /**
+ * Attributes returns the attributes for the file with fileKey path.
+ *
+ * If the file doesn't exist it returns ErrNotFound.
+ */
+ attributes(fileKey: string): (blob.Attributes)
+ }
+ interface System {
+ /**
+ * GetReader returns a file content reader for the given fileKey.
+ *
+ * NB! Make sure to call Close() on the file after you are done working with it.
+ *
+ * If the file doesn't exist returns ErrNotFound.
+ */
+ getReader(fileKey: string): (blob.Reader)
+ }
+ interface System {
+ /**
+ * Deprecated: Please use GetReader(fileKey) instead.
+ */
+ getFile(fileKey: string): (blob.Reader)
+ }
+ interface System {
+ /**
+ * GetReuploadableFile constructs a new reuploadable File value
+ * from the associated fileKey blob.Reader.
+ *
+ * If preserveName is false then the returned File.Name will have
+ * a new randomly generated suffix, otherwise it will reuse the original one.
+ *
+ * This method could be useful in case you want to clone an existing
+ * Record file and assign it to a new Record (e.g. in a Record duplicate action).
+ *
+ * If you simply want to copy an existing file to a new location you
+ * could check the Copy(srcKey, dstKey) method.
+ */
+ getReuploadableFile(fileKey: string, preserveName: boolean): (File)
+ }
+ interface System {
+ /**
+ * Copy copies the file stored at srcKey to dstKey.
+ *
+ * If srcKey file doesn't exist, it returns ErrNotFound.
+ *
+ * If dstKey file already exists, it is overwritten.
+ */
+ copy(srcKey: string, dstKey: string): void
+ }
+ interface System {
+ /**
+ * List returns a flat list with info for all files under the specified prefix.
+ */
+ list(prefix: string): Array<(blob.ListObject | undefined)>
+ }
+ interface System {
+ /**
+ * Upload writes content into the fileKey location.
+ */
+ upload(content: string|Array, fileKey: string): void
+ }
+ interface System {
+ /**
+ * UploadFile uploads the provided File to the fileKey location.
+ */
+ uploadFile(file: File, fileKey: string): void
+ }
+ interface System {
+ /**
+ * UploadMultipart uploads the provided multipart file to the fileKey location.
+ */
+ uploadMultipart(fh: multipart.FileHeader, fileKey: string): void
+ }
+ interface System {
+ /**
+ * Delete deletes stored file at fileKey location.
+ *
+ * If the file doesn't exist returns ErrNotFound.
+ */
+ delete(fileKey: string): void
+ }
+ interface System {
+ /**
+ * DeletePrefix deletes everything starting with the specified prefix.
+ *
+ * The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_").
+ */
+ deletePrefix(prefix: string): Array
+ }
+ interface System {
+ /**
+ * Checks if the provided dir prefix doesn't have any files.
+ *
+ * A trailing slash will be appended to a non-empty dir string argument
+ * to ensure that the checked prefix is a "directory".
+ *
+ * Returns "false" in case the has at least one file, otherwise - "true".
+ */
+ isEmptyDir(dir: string): boolean
+ }
+ interface System {
+ /**
+ * Serve serves the file at fileKey location to an HTTP response.
+ *
+ * If the `download` query parameter is used the file will be always served for
+ * download no matter of its type (aka. with "Content-Disposition: attachment").
+ *
+ * Internally this method uses [http.ServeContent] so Range requests,
+ * If-Match, If-Unmodified-Since, etc. headers are handled transparently.
+ */
+ serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void
+ }
+ interface System {
+ /**
+ * CreateThumb creates a new thumb image for the file at originalKey location.
+ * The new thumb file is stored at thumbKey location.
+ *
+ * thumbSize is in the format:
+ * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio
+ * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio
+ * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center)
+ * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top)
+ * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom)
+ * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping)
+ */
+ createThumb(originalKey: string, thumbKey: string, thumbSize: string): void
+ }
+}
+
+/**
+ * Package exec runs external commands. It wraps os.StartProcess to make it
+ * easier to remap stdin and stdout, connect I/O with pipes, and do other
+ * adjustments.
+ *
+ * Unlike the "system" library call from C and other languages, the
+ * os/exec package intentionally does not invoke the system shell and
+ * does not expand any glob patterns or handle other expansions,
+ * pipelines, or redirections typically done by shells. The package
+ * behaves more like C's "exec" family of functions. To expand glob
+ * patterns, either call the shell directly, taking care to escape any
+ * dangerous input, or use the [path/filepath] package's Glob function.
+ * To expand environment variables, use package os's ExpandEnv.
+ *
+ * Note that the examples in this package assume a Unix system.
+ * They may not run on Windows, and they do not run in the Go Playground
+ * used by golang.org and godoc.org.
+ *
+ * # Executables in the current directory
+ *
+ * The functions [Command] and [LookPath] look for a program
+ * in the directories listed in the current path, following the
+ * conventions of the host operating system.
+ * Operating systems have for decades included the current
+ * directory in this search, sometimes implicitly and sometimes
+ * configured explicitly that way by default.
+ * Modern practice is that including the current directory
+ * is usually unexpected and often leads to security problems.
+ *
+ * To avoid those security problems, as of Go 1.19, this package will not resolve a program
+ * using an implicit or explicit path entry relative to the current directory.
+ * That is, if you run [LookPath]("go"), it will not successfully return
+ * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured.
+ * Instead, if the usual path algorithms would result in that answer,
+ * these functions return an error err satisfying [errors.Is](err, [ErrDot]).
+ *
+ * For example, consider these two program snippets:
+ *
+ * ```
+ * path, err := exec.LookPath("prog")
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * use(path)
+ * ```
+ *
+ * and
+ *
+ * ```
+ * cmd := exec.Command("prog")
+ * if err := cmd.Run(); err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * These will not find and run ./prog or .\prog.exe,
+ * no matter how the current path is configured.
+ *
+ * Code that always wants to run a program from the current directory
+ * can be rewritten to say "./prog" instead of "prog".
+ *
+ * Code that insists on including results from relative path entries
+ * can instead override the error using an errors.Is check:
+ *
+ * ```
+ * path, err := exec.LookPath("prog")
+ * if errors.Is(err, exec.ErrDot) {
+ * err = nil
+ * }
+ * if err != nil {
+ * log.Fatal(err)
+ * }
+ * use(path)
+ * ```
+ *
+ * and
+ *
+ * ```
+ * cmd := exec.Command("prog")
+ * if errors.Is(cmd.Err, exec.ErrDot) {
+ * cmd.Err = nil
+ * }
+ * if err := cmd.Run(); err != nil {
+ * log.Fatal(err)
+ * }
+ * ```
+ *
+ * Setting the environment variable GODEBUG=execerrdot=0
+ * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19
+ * behavior for programs that are unable to apply more targeted fixes.
+ * A future version of Go may remove support for this variable.
+ *
+ * Before adding such overrides, make sure you understand the
+ * security implications of doing so.
+ * See https://go.dev/blog/path-security for more information.
+ */
+namespace exec {
+ interface command {
+ /**
+ * Command returns the [Cmd] struct to execute the named program with
+ * the given arguments.
+ *
+ * It sets only the Path and Args in the returned structure.
+ *
+ * If name contains no path separators, Command uses [LookPath] to
+ * resolve name to a complete path if possible. Otherwise it uses name
+ * directly as Path.
+ *
+ * The returned Cmd's Args field is constructed from the command name
+ * followed by the elements of arg, so arg should not include the
+ * command name itself. For example, Command("echo", "hello").
+ * Args[0] is always name, not the possibly resolved Path.
+ *
+ * On Windows, processes receive the whole command line as a single string
+ * and do their own parsing. Command combines and quotes Args into a command
+ * line string with an algorithm compatible with applications using
+ * CommandLineToArgvW (which is the most common way). Notable exceptions are
+ * msiexec.exe and cmd.exe (and thus, all batch files), which have a different
+ * unquoting algorithm. In these or other similar cases, you can do the
+ * quoting yourself and provide the full command line in SysProcAttr.CmdLine,
+ * leaving Args empty.
+ */
+ (name: string, ...arg: string[]): (Cmd)
+ }
+}
+
+/**
+ * Package core is the backbone of PocketBase.
+ *
+ * It defines the main PocketBase App interface and its base implementation.
+ */
+namespace core {
+ /**
+ * App defines the main PocketBase app interface.
+ *
+ * Note that the interface is not intended to be implemented manually by users
+ * and instead they should use core.BaseApp (either directly or as embedded field in a custom struct).
+ *
+ * This interface exists to make testing easier and to allow users to
+ * create common and pluggable helpers and methods that doesn't rely
+ * on a specific wrapped app struct (hence the large interface size).
+ */
+ interface App {
+ [key:string]: any;
+ /**
+ * UnsafeWithoutHooks returns a shallow copy of the current app WITHOUT any registered hooks.
+ *
+ * NB! Note that using the returned app instance may cause data integrity errors
+ * since the Record validations and data normalizations (including files uploads)
+ * rely on the app hooks to work.
+ */
+ unsafeWithoutHooks(): App
+ /**
+ * Logger returns the default app logger.
+ *
+ * If the application is not bootstrapped yet, fallbacks to slog.Default().
+ */
+ logger(): (slog.Logger)
+ /**
+ * IsBootstrapped checks if the application was initialized
+ * (aka. whether Bootstrap() was called).
+ */
+ isBootstrapped(): boolean
+ /**
+ * IsTransactional checks if the current app instance is part of a transaction.
+ */
+ isTransactional(): boolean
+ /**
+ * TxInfo returns the transaction associated with the current app instance (if any).
+ *
+ * Could be used if you want to execute indirectly a function after
+ * the related app transaction completes using `app.TxInfo().OnAfterFunc(callback)`.
+ */
+ txInfo(): (TxAppInfo)
+ /**
+ * Bootstrap initializes the application
+ * (aka. create data dir, open db connections, load settings, etc.).
+ *
+ * It will call ResetBootstrapState() if the application was already bootstrapped.
+ */
+ bootstrap(): void
+ /**
+ * ResetBootstrapState releases the initialized core app resources
+ * (closing db connections, stopping cron ticker, etc.).
+ */
+ resetBootstrapState(): void
+ /**
+ * DataDir returns the app data directory path.
+ */
+ dataDir(): string
+ /**
+ * EncryptionEnv returns the name of the app secret env key
+ * (currently used primarily for optional settings encryption but this may change in the future).
+ */
+ encryptionEnv(): string
+ /**
+ * IsDev returns whether the app is in dev mode.
+ *
+ * When enabled logs, executed sql statements, etc. are printed to the stderr.
+ */
+ isDev(): boolean
+ /**
+ * Settings returns the loaded app settings.
+ */
+ settings(): (Settings)
+ /**
+ * Store returns the app runtime store.
+ */
+ store(): (store.Store)
+ /**
+ * Cron returns the app cron instance.
+ */
+ cron(): (cron.Cron)
+ /**
+ * SubscriptionsBroker returns the app realtime subscriptions broker instance.
+ */
+ subscriptionsBroker(): (subscriptions.Broker)
+ /**
+ * NewMailClient creates and returns a new SMTP or Sendmail client
+ * based on the current app settings.
+ */
+ newMailClient(): mailer.Mailer
+ /**
+ * NewFilesystem creates a new local or S3 filesystem instance
+ * for managing regular app files (ex. record uploads)
+ * based on the current app settings.
+ *
+ * NB! Make sure to call Close() on the returned result
+ * after you are done working with it.
+ */
+ newFilesystem(): (filesystem.System)
+ /**
+ * NewBackupsFilesystem creates a new local or S3 filesystem instance
+ * for managing app backups based on the current app settings.
+ *
+ * NB! Make sure to call Close() on the returned result
+ * after you are done working with it.
+ */
+ newBackupsFilesystem(): (filesystem.System)
+ /**
+ * ReloadSettings reinitializes and reloads the stored application settings.
+ */
+ reloadSettings(): void
+ /**
+ * CreateBackup creates a new backup of the current app pb_data directory.
+ *
+ * Backups can be stored on S3 if it is configured in app.Settings().Backups.
+ *
+ * Please refer to the godoc of the specific CoreApp implementation
+ * for details on the backup procedures.
+ */
+ createBackup(ctx: context.Context, name: string): void
+ /**
+ * RestoreBackup restores the backup with the specified name and restarts
+ * the current running application process.
+ *
+ * The safely perform the restore it is recommended to have free disk space
+ * for at least 2x the size of the restored pb_data backup.
+ *
+ * Please refer to the godoc of the specific CoreApp implementation
+ * for details on the restore procedures.
+ *
+ * NB! This feature is experimental and currently is expected to work only on UNIX based systems.
+ */
+ restoreBackup(ctx: context.Context, name: string): void
+ /**
+ * Restart restarts (aka. replaces) the current running application process.
+ *
+ * NB! It relies on execve which is supported only on UNIX based systems.
+ */
+ restart(): void
+ /**
+ * RunSystemMigrations applies all new migrations registered in the [core.SystemMigrations] list.
+ */
+ runSystemMigrations(): void
+ /**
+ * RunAppMigrations applies all new migrations registered in the [CoreAppMigrations] list.
+ */
+ runAppMigrations(): void
+ /**
+ * RunAllMigrations applies all system and app migrations
+ * (aka. from both [core.SystemMigrations] and [CoreAppMigrations]).
+ */
+ runAllMigrations(): void
+ /**
+ * DB returns the default app data.db builder instance.
+ *
+ * To minimize SQLITE_BUSY errors, it automatically routes the
+ * SELECT queries to the underlying concurrent db pool and everything else
+ * to the nonconcurrent one.
+ *
+ * For more finer control over the used connections pools you can
+ * call directly ConcurrentDB() or NonconcurrentDB().
+ */
+ db(): dbx.Builder
+ /**
+ * ConcurrentDB returns the concurrent app data.db builder instance.
+ *
+ * This method is used mainly internally for executing db read
+ * operations in a concurrent/non-blocking manner.
+ *
+ * Most users should use simply DB() as it will automatically
+ * route the query execution to ConcurrentDB() or NonconcurrentDB().
+ *
+ * In a transaction the ConcurrentDB() and NonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ concurrentDB(): dbx.Builder
+ /**
+ * NonconcurrentDB returns the nonconcurrent app data.db builder instance.
+ *
+ * The returned db instance is limited only to a single open connection,
+ * meaning that it can process only 1 db operation at a time (other queries queue up).
+ *
+ * This method is used mainly internally and in the tests to execute write
+ * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors.
+ *
+ * Most users should use simply DB() as it will automatically
+ * route the query execution to ConcurrentDB() or NonconcurrentDB().
+ *
+ * In a transaction the ConcurrentDB() and NonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ nonconcurrentDB(): dbx.Builder
+ /**
+ * AuxDB returns the app auxiliary.db builder instance.
+ *
+ * To minimize SQLITE_BUSY errors, it automatically routes the
+ * SELECT queries to the underlying concurrent db pool and everything else
+ * to the nonconcurrent one.
+ *
+ * For more finer control over the used connections pools you can
+ * call directly AuxConcurrentDB() or AuxNonconcurrentDB().
+ */
+ auxDB(): dbx.Builder
+ /**
+ * AuxConcurrentDB returns the concurrent app auxiliary.db builder instance.
+ *
+ * This method is used mainly internally for executing db read
+ * operations in a concurrent/non-blocking manner.
+ *
+ * Most users should use simply AuxDB() as it will automatically
+ * route the query execution to AuxConcurrentDB() or AuxNonconcurrentDB().
+ *
+ * In a transaction the AuxConcurrentDB() and AuxNonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ auxConcurrentDB(): dbx.Builder
+ /**
+ * AuxNonconcurrentDB returns the nonconcurrent app auxiliary.db builder instance.
+ *
+ * The returned db instance is limited only to a single open connection,
+ * meaning that it can process only 1 db operation at a time (other queries queue up).
+ *
+ * This method is used mainly internally and in the tests to execute write
+ * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors.
+ *
+ * Most users should use simply AuxDB() as it will automatically
+ * route the query execution to AuxConcurrentDB() or AuxNonconcurrentDB().
+ *
+ * In a transaction the AuxConcurrentDB() and AuxNonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ auxNonconcurrentDB(): dbx.Builder
+ /**
+ * HasTable checks if a table (or view) with the provided name exists (case insensitive).
+ * in the data.db.
+ */
+ hasTable(tableName: string): boolean
+ /**
+ * AuxHasTable checks if a table (or view) with the provided name exists (case insensitive)
+ * in the auxiliary.db.
+ */
+ auxHasTable(tableName: string): boolean
+ /**
+ * TableColumns returns all column names of a single table by its name.
+ */
+ tableColumns(tableName: string): Array
+ /**
+ * TableInfo returns the "table_info" pragma result for the specified table.
+ */
+ tableInfo(tableName: string): Array<(TableInfoRow | undefined)>
+ /**
+ * TableIndexes returns a name grouped map with all non empty index of the specified table.
+ *
+ * Note: This method doesn't return an error on nonexisting table.
+ */
+ tableIndexes(tableName: string): _TygojaDict
+ /**
+ * DeleteTable drops the specified table.
+ *
+ * This method is a no-op if a table with the provided name doesn't exist.
+ *
+ * NB! Be aware that this method is vulnerable to SQL injection and the
+ * "tableName" argument must come only from trusted input!
+ */
+ deleteTable(tableName: string): void
+ /**
+ * DeleteView drops the specified view name.
+ *
+ * This method is a no-op if a view with the provided name doesn't exist.
+ *
+ * NB! Be aware that this method is vulnerable to SQL injection and the
+ * "name" argument must come only from trusted input!
+ */
+ deleteView(name: string): void
+ /**
+ * SaveView creates (or updates already existing) persistent SQL view.
+ *
+ * NB! Be aware that this method is vulnerable to SQL injection and the
+ * "selectQuery" argument must come only from trusted input!
+ */
+ saveView(name: string, selectQuery: string): void
+ /**
+ * CreateViewFields creates a new FieldsList from the provided select query.
+ *
+ * There are some caveats:
+ * - The select query must have an "id" column.
+ * - Wildcard ("*") columns are not supported to avoid accidentally leaking sensitive data.
+ */
+ createViewFields(selectQuery: string): FieldsList
+ /**
+ * FindRecordByViewFile returns the original Record of the provided view collection file.
+ */
+ findRecordByViewFile(viewCollectionModelOrIdentifier: any, fileFieldName: string, filename: string): (Record)
+ /**
+ * Vacuum executes VACUUM on the data.db in order to reclaim unused data db disk space.
+ */
+ vacuum(): void
+ /**
+ * AuxVacuum executes VACUUM on the auxiliary.db in order to reclaim unused auxiliary db disk space.
+ */
+ auxVacuum(): void
+ /**
+ * ModelQuery creates a new preconfigured select data.db query with preset
+ * SELECT, FROM and other common fields based on the provided model.
+ */
+ modelQuery(model: Model): (dbx.SelectQuery)
+ /**
+ * AuxModelQuery creates a new preconfigured select auxiliary.db query with preset
+ * SELECT, FROM and other common fields based on the provided model.
+ */
+ auxModelQuery(model: Model): (dbx.SelectQuery)
+ /**
+ * Delete deletes the specified model from the regular app database.
+ */
+ delete(model: Model): void
+ /**
+ * Delete deletes the specified model from the regular app database
+ * (the context could be used to limit the query execution).
+ */
+ deleteWithContext(ctx: context.Context, model: Model): void
+ /**
+ * AuxDelete deletes the specified model from the auxiliary database.
+ */
+ auxDelete(model: Model): void
+ /**
+ * AuxDeleteWithContext deletes the specified model from the auxiliary database
+ * (the context could be used to limit the query execution).
+ */
+ auxDeleteWithContext(ctx: context.Context, model: Model): void
+ /**
+ * Save validates and saves the specified model into the regular app database.
+ *
+ * If you don't want to run validations, use [App.SaveNoValidate()].
+ */
+ save(model: Model): void
+ /**
+ * SaveWithContext is the same as [App.Save()] but allows specifying a context to limit the db execution.
+ *
+ * If you don't want to run validations, use [App.SaveNoValidateWithContext()].
+ */
+ saveWithContext(ctx: context.Context, model: Model): void
+ /**
+ * SaveNoValidate saves the specified model into the regular app database without performing validations.
+ *
+ * If you want to also run validations before persisting, use [App.Save()].
+ */
+ saveNoValidate(model: Model): void
+ /**
+ * SaveNoValidateWithContext is the same as [App.SaveNoValidate()]
+ * but allows specifying a context to limit the db execution.
+ *
+ * If you want to also run validations before persisting, use [App.SaveWithContext()].
+ */
+ saveNoValidateWithContext(ctx: context.Context, model: Model): void
+ /**
+ * AuxSave validates and saves the specified model into the auxiliary app database.
+ *
+ * If you don't want to run validations, use [App.AuxSaveNoValidate()].
+ */
+ auxSave(model: Model): void
+ /**
+ * AuxSaveWithContext is the same as [App.AuxSave()] but allows specifying a context to limit the db execution.
+ *
+ * If you don't want to run validations, use [App.AuxSaveNoValidateWithContext()].
+ */
+ auxSaveWithContext(ctx: context.Context, model: Model): void
+ /**
+ * AuxSaveNoValidate saves the specified model into the auxiliary app database without performing validations.
+ *
+ * If you want to also run validations before persisting, use [App.AuxSave()].
+ */
+ auxSaveNoValidate(model: Model): void
+ /**
+ * AuxSaveNoValidateWithContext is the same as [App.AuxSaveNoValidate()]
+ * but allows specifying a context to limit the db execution.
+ *
+ * If you want to also run validations before persisting, use [App.AuxSaveWithContext()].
+ */
+ auxSaveNoValidateWithContext(ctx: context.Context, model: Model): void
+ /**
+ * Validate triggers the OnModelValidate hook for the specified model.
+ */
+ validate(model: Model): void
+ /**
+ * ValidateWithContext is the same as Validate but allows specifying the ModelEvent context.
+ */
+ validateWithContext(ctx: context.Context, model: Model): void
+ /**
+ * RunInTransaction wraps fn into a transaction for the regular app database.
+ *
+ * It is safe to nest RunInTransaction calls as long as you use the callback's txApp.
+ */
+ runInTransaction(fn: (txApp: App) => void): void
+ /**
+ * AuxRunInTransaction wraps fn into a transaction for the auxiliary app database.
+ *
+ * It is safe to nest RunInTransaction calls as long as you use the callback's txApp.
+ */
+ auxRunInTransaction(fn: (txApp: App) => void): void
+ /**
+ * LogQuery returns a new Log select query.
+ */
+ logQuery(): (dbx.SelectQuery)
+ /**
+ * FindLogById finds a single Log entry by its id.
+ */
+ findLogById(id: string): (Log)
+ /**
+ * LogsStatsItem returns hourly grouped logs statistics.
+ */
+ logsStats(expr: dbx.Expression): Array<(LogsStatsItem | undefined)>
+ /**
+ * DeleteOldLogs delete all logs that are created before createdBefore.
+ */
+ deleteOldLogs(createdBefore: time.Time): void
+ /**
+ * CollectionQuery returns a new Collection select query.
+ */
+ collectionQuery(): (dbx.SelectQuery)
+ /**
+ * FindCollections finds all collections by the given type(s).
+ *
+ * If collectionTypes is not set, it returns all collections.
+ *
+ * Example:
+ *
+ * ```
+ * app.FindAllCollections() // all collections
+ * app.FindAllCollections("auth", "view") // only auth and view collections
+ * ```
+ */
+ findAllCollections(...collectionTypes: string[]): Array<(Collection | undefined)>
+ /**
+ * ReloadCachedCollections fetches all collections and caches them into the app store.
+ */
+ reloadCachedCollections(): void
+ /**
+ * FindCollectionByNameOrId finds a single collection by its name (case insensitive) or id.s
+ */
+ findCollectionByNameOrId(nameOrId: string): (Collection)
+ /**
+ * FindCachedCollectionByNameOrId is similar to [App.FindCollectionByNameOrId]
+ * but retrieves the Collection from the app cache instead of making a db call.
+ *
+ * NB! This method is suitable for read-only Collection operations.
+ *
+ * Returns [sql.ErrNoRows] if no Collection is found for consistency
+ * with the [App.FindCollectionByNameOrId] method.
+ *
+ * If you plan making changes to the returned Collection model,
+ * use [App.FindCollectionByNameOrId] instead.
+ *
+ * Caveats:
+ *
+ * ```
+ * - The returned Collection should be used only for read-only operations.
+ * Avoid directly modifying the returned cached Collection as it will affect
+ * the global cached value even if you don't persist the changes in the database!
+ * - If you are updating a Collection in a transaction and then call this method before commit,
+ * it'll return the cached Collection state and not the one from the uncommitted transaction.
+ * - The cache is automatically updated on collections db change (create/update/delete).
+ * To manually reload the cache you can call [App.ReloadCachedCollections]
+ * ```
+ */
+ findCachedCollectionByNameOrId(nameOrId: string): (Collection)
+ /**
+ * FindCollectionReferences returns information for all relation
+ * fields referencing the provided collection.
+ *
+ * If the provided collection has reference to itself then it will be
+ * also included in the result. To exclude it, pass the collection id
+ * as the excludeIds argument.
+ */
+ findCollectionReferences(collection: Collection, ...excludeIds: string[]): _TygojaDict
+ /**
+ * FindCachedCollectionReferences is similar to [App.FindCollectionReferences]
+ * but retrieves the Collection from the app cache instead of making a db call.
+ *
+ * NB! This method is suitable for read-only Collection operations.
+ *
+ * If you plan making changes to the returned Collection model,
+ * use [App.FindCollectionReferences] instead.
+ *
+ * Caveats:
+ *
+ * ```
+ * - The returned Collection should be used only for read-only operations.
+ * Avoid directly modifying the returned cached Collection as it will affect
+ * the global cached value even if you don't persist the changes in the database!
+ * - If you are updating a Collection in a transaction and then call this method before commit,
+ * it'll return the cached Collection state and not the one from the uncommitted transaction.
+ * - The cache is automatically updated on collections db change (create/update/delete).
+ * To manually reload the cache you can call [App.ReloadCachedCollections].
+ * ```
+ */
+ findCachedCollectionReferences(collection: Collection, ...excludeIds: string[]): _TygojaDict
+ /**
+ * IsCollectionNameUnique checks that there is no existing collection
+ * with the provided name (case insensitive!).
+ *
+ * Note: case insensitive check because the name is used also as
+ * table name for the records.
+ */
+ isCollectionNameUnique(name: string, ...excludeIds: string[]): boolean
+ /**
+ * TruncateCollection deletes all records associated with the provided collection.
+ *
+ * The truncate operation is executed in a single transaction,
+ * aka. either everything is deleted or none.
+ *
+ * Note that this method will also trigger the records related
+ * cascade and file delete actions.
+ */
+ truncateCollection(collection: Collection): void
+ /**
+ * ImportCollections imports the provided collections data in a single transaction.
+ *
+ * For existing matching collections, the imported data is unmarshaled on top of the existing model.
+ *
+ * NB! If deleteMissing is true, ALL NON-SYSTEM COLLECTIONS AND SCHEMA FIELDS,
+ * that are not present in the imported configuration, WILL BE DELETED
+ * (this includes their related records data).
+ */
+ importCollections(toImport: Array<_TygojaDict>, deleteMissing: boolean): void
+ /**
+ * ImportCollectionsByMarshaledJSON is the same as [ImportCollections]
+ * but accept marshaled json array as import data (usually used for the autogenerated snapshots).
+ */
+ importCollectionsByMarshaledJSON(rawSliceOfMaps: string|Array, deleteMissing: boolean): void
+ /**
+ * SyncRecordTableSchema compares the two provided collections
+ * and applies the necessary related record table changes.
+ *
+ * If oldCollection is null, then only newCollection is used to create the record table.
+ *
+ * This method is automatically invoked as part of a collection create/update/delete operation.
+ */
+ syncRecordTableSchema(newCollection: Collection, oldCollection: Collection): void
+ /**
+ * FindAllExternalAuthsByRecord returns all ExternalAuth models
+ * linked to the provided auth record.
+ */
+ findAllExternalAuthsByRecord(authRecord: Record): Array<(ExternalAuth | undefined)>
+ /**
+ * FindAllExternalAuthsByCollection returns all ExternalAuth models
+ * linked to the provided auth collection.
+ */
+ findAllExternalAuthsByCollection(collection: Collection): Array<(ExternalAuth | undefined)>
+ /**
+ * FindFirstExternalAuthByExpr returns the first available (the most recent created)
+ * ExternalAuth model that satisfies the non-nil expression.
+ */
+ findFirstExternalAuthByExpr(expr: dbx.Expression): (ExternalAuth)
+ /**
+ * FindAllMFAsByRecord returns all MFA models linked to the provided auth record.
+ */
+ findAllMFAsByRecord(authRecord: Record): Array<(MFA | undefined)>
+ /**
+ * FindAllMFAsByCollection returns all MFA models linked to the provided collection.
+ */
+ findAllMFAsByCollection(collection: Collection): Array<(MFA | undefined)>
+ /**
+ * FindMFAById returns a single MFA model by its id.
+ */
+ findMFAById(id: string): (MFA)
+ /**
+ * DeleteAllMFAsByRecord deletes all MFA models associated with the provided record.
+ *
+ * Returns a combined error with the failed deletes.
+ */
+ deleteAllMFAsByRecord(authRecord: Record): void
+ /**
+ * DeleteExpiredMFAs deletes the expired MFAs for all auth collections.
+ */
+ deleteExpiredMFAs(): void
+ /**
+ * FindAllOTPsByRecord returns all OTP models linked to the provided auth record.
+ */
+ findAllOTPsByRecord(authRecord: Record): Array<(OTP | undefined)>
+ /**
+ * FindAllOTPsByCollection returns all OTP models linked to the provided collection.
+ */
+ findAllOTPsByCollection(collection: Collection): Array<(OTP | undefined)>
+ /**
+ * FindOTPById returns a single OTP model by its id.
+ */
+ findOTPById(id: string): (OTP)
+ /**
+ * DeleteAllOTPsByRecord deletes all OTP models associated with the provided record.
+ *
+ * Returns a combined error with the failed deletes.
+ */
+ deleteAllOTPsByRecord(authRecord: Record): void
+ /**
+ * DeleteExpiredOTPs deletes the expired OTPs for all auth collections.
+ */
+ deleteExpiredOTPs(): void
+ /**
+ * FindAllAuthOriginsByRecord returns all AuthOrigin models linked to the provided auth record (in DESC order).
+ */
+ findAllAuthOriginsByRecord(authRecord: Record): Array<(AuthOrigin | undefined)>
+ /**
+ * FindAllAuthOriginsByCollection returns all AuthOrigin models linked to the provided collection (in DESC order).
+ */
+ findAllAuthOriginsByCollection(collection: Collection): Array<(AuthOrigin | undefined)>
+ /**
+ * FindAuthOriginById returns a single AuthOrigin model by its id.
+ */
+ findAuthOriginById(id: string): (AuthOrigin)
+ /**
+ * FindAuthOriginByRecordAndFingerprint returns a single AuthOrigin model
+ * by its authRecord relation and fingerprint.
+ */
+ findAuthOriginByRecordAndFingerprint(authRecord: Record, fingerprint: string): (AuthOrigin)
+ /**
+ * DeleteAllAuthOriginsByRecord deletes all AuthOrigin models associated with the provided record.
+ *
+ * Returns a combined error with the failed deletes.
+ */
+ deleteAllAuthOriginsByRecord(authRecord: Record): void
+ /**
+ * RecordQuery returns a new Record select query from a collection model, id or name.
+ *
+ * In case a collection id or name is provided and that collection doesn't
+ * actually exists, the generated query will be created with a cancelled context
+ * and will fail once an executor (Row(), One(), All(), etc.) is called.
+ */
+ recordQuery(collectionModelOrIdentifier: any): (dbx.SelectQuery)
+ /**
+ * FindRecordById finds the Record model by its id.
+ */
+ findRecordById(collectionModelOrIdentifier: any, recordId: string, ...optFilters: ((q: dbx.SelectQuery) => void)[]): (Record)
+ /**
+ * FindRecordsByIds finds all records by the specified ids.
+ * If no records are found, returns an empty slice.
+ */
+ findRecordsByIds(collectionModelOrIdentifier: any, recordIds: Array, ...optFilters: ((q: dbx.SelectQuery) => void)[]): Array<(Record | undefined)>
+ /**
+ * FindAllRecords finds all records matching specified db expressions.
+ *
+ * Returns all collection records if no expression is provided.
+ *
+ * Returns an empty slice if no records are found.
+ *
+ * Example:
+ *
+ * ```
+ * // no extra expressions
+ * app.FindAllRecords("example")
+ *
+ * // with extra expressions
+ * expr1 := dbx.HashExp{"email": "test@example.com"}
+ * expr2 := dbx.NewExp("LOWER(username) = {:username}", dbx.Params{"username": "test"})
+ * app.FindAllRecords("example", expr1, expr2)
+ * ```
+ */
+ findAllRecords(collectionModelOrIdentifier: any, ...exprs: dbx.Expression[]): Array<(Record | undefined)>
+ /**
+ * FindFirstRecordByData returns the first found record matching
+ * the provided key-value pair.
+ */
+ findFirstRecordByData(collectionModelOrIdentifier: any, key: string, value: any): (Record)
+ /**
+ * FindRecordsByFilter returns limit number of records matching the
+ * provided string filter.
+ *
+ * NB! Use the last "params" argument to bind untrusted user variables!
+ *
+ * The filter argument is optional and can be empty string to target
+ * all available records.
+ *
+ * The sort argument is optional and can be empty string OR the same format
+ * used in the web APIs, ex. "-created,title".
+ *
+ * If the limit argument is <= 0, no limit is applied to the query and
+ * all matching records are returned.
+ *
+ * Returns an empty slice if no records are found.
+ *
+ * Example:
+ *
+ * ```
+ * app.FindRecordsByFilter(
+ * "posts",
+ * "title ~ {:title} && visible = {:visible}",
+ * "-created",
+ * 10,
+ * 0,
+ * dbx.Params{"title": "lorem ipsum", "visible": true}
+ * )
+ * ```
+ */
+ findRecordsByFilter(collectionModelOrIdentifier: any, filter: string, sort: string, limit: number, offset: number, ...params: dbx.Params[]): Array<(Record | undefined)>
+ /**
+ * FindFirstRecordByFilter returns the first available record matching the provided filter (if any).
+ *
+ * NB! Use the last params argument to bind untrusted user variables!
+ *
+ * Returns sql.ErrNoRows if no record is found.
+ *
+ * Example:
+ *
+ * ```
+ * app.FindFirstRecordByFilter("posts", "")
+ * app.FindFirstRecordByFilter("posts", "slug={:slug} && status='public'", dbx.Params{"slug": "test"})
+ * ```
+ */
+ findFirstRecordByFilter(collectionModelOrIdentifier: any, filter: string, ...params: dbx.Params[]): (Record)
+ /**
+ * CountRecords returns the total number of records in a collection.
+ */
+ countRecords(collectionModelOrIdentifier: any, ...exprs: dbx.Expression[]): number
+ /**
+ * FindAuthRecordByToken finds the auth record associated with the provided JWT
+ * (auth, file, verifyEmail, changeEmail, passwordReset types).
+ *
+ * Optionally specify a list of validTypes to check tokens only from those types.
+ *
+ * Returns an error if the JWT is invalid, expired or not associated to an auth collection record.
+ */
+ findAuthRecordByToken(token: string, ...validTypes: string[]): (Record)
+ /**
+ * FindAuthRecordByEmail finds the auth record associated with the provided email.
+ *
+ * Returns an error if it is not an auth collection or the record is not found.
+ */
+ findAuthRecordByEmail(collectionModelOrIdentifier: any, email: string): (Record)
+ /**
+ * CanAccessRecord checks if a record is allowed to be accessed by the
+ * specified requestInfo and accessRule.
+ *
+ * Rule and db checks are ignored in case requestInfo.Auth is a superuser.
+ *
+ * The returned error indicate that something unexpected happened during
+ * the check (eg. invalid rule or db query error).
+ *
+ * The method always return false on invalid rule or db query error.
+ *
+ * Example:
+ *
+ * ```
+ * requestInfo, _ := e.RequestInfo()
+ * record, _ := app.FindRecordById("example", "RECORD_ID")
+ * rule := types.Pointer("@request.auth.id != '' || status = 'public'")
+ * // ... or use one of the record collection's rule, eg. record.Collection().ViewRule
+ *
+ * if ok, _ := app.CanAccessRecord(record, requestInfo, rule); ok { ... }
+ * ```
+ */
+ canAccessRecord(record: Record, requestInfo: RequestInfo, accessRule: string): boolean
+ /**
+ * ExpandRecord expands the relations of a single Record model.
+ *
+ * If optFetchFunc is not set, then a default function will be used
+ * that returns all relation records.
+ *
+ * Returns a map with the failed expand parameters and their errors.
+ */
+ expandRecord(record: Record, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict
+ /**
+ * ExpandRecords expands the relations of the provided Record models list.
+ *
+ * If optFetchFunc is not set, then a default function will be used
+ * that returns all relation records.
+ *
+ * Returns a map with the failed expand parameters and their errors.
+ */
+ expandRecords(records: Array<(Record | undefined)>, expands: Array, optFetchFunc: ExpandFetchFunc): _TygojaDict
+ /**
+ * OnBootstrap hook is triggered when initializing the main application
+ * resources (db, app settings, etc).
+ */
+ onBootstrap(): (hook.Hook)
+ /**
+ * OnServe hook is triggered when the app web server is started
+ * (after starting the TCP listener but before initializing the blocking serve task),
+ * allowing you to adjust its options and attach new routes or middlewares.
+ */
+ onServe(): (hook.Hook)
+ /**
+ * OnTerminate hook is triggered when the app is in the process
+ * of being terminated (ex. on SIGTERM signal).
+ *
+ * Note that the app could be terminated abruptly without awaiting the hook completion.
+ */
+ onTerminate(): (hook.Hook)
+ /**
+ * OnBackupCreate hook is triggered on each [App.CreateBackup] call.
+ */
+ onBackupCreate(): (hook.Hook)
+ /**
+ * OnBackupRestore hook is triggered before app backup restore (aka. [App.RestoreBackup] call).
+ *
+ * Note that by default on success the application is restarted and the after state of the hook is ignored.
+ */
+ onBackupRestore(): (hook.Hook)
+ /**
+ * OnModelValidate is triggered every time when a model is being validated
+ * (e.g. triggered by App.Validate() or App.Save()).
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelValidate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelCreate is triggered every time when a new model is being created
+ * (e.g. triggered by App.Save()).
+ *
+ * Operations BEFORE the e.Next() execute before the model validation
+ * and the INSERT DB statement.
+ *
+ * Operations AFTER the e.Next() execute after the model validation
+ * and the INSERT DB statement.
+ *
+ * Note that successful execution doesn't guarantee that the model
+ * is persisted in the database since its wrapping transaction may
+ * not have been committed yet.
+ * If you want to listen to only the actual persisted events, you can
+ * bind to [OnModelAfterCreateSuccess] or [OnModelAfterCreateError] hooks.
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelCreate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelCreateExecute is triggered after successful Model validation
+ * and right before the model INSERT DB statement execution.
+ *
+ * Usually it is triggered as part of the App.Save() in the following firing order:
+ * OnModelCreate {
+ * ```
+ * -> OnModelValidate (skipped with App.SaveNoValidate())
+ * -> OnModelCreateExecute
+ * ```
+ * }
+ *
+ * Note that successful execution doesn't guarantee that the model
+ * is persisted in the database since its wrapping transaction may have been
+ * committed yet.
+ * If you want to listen to only the actual persisted events,
+ * you can bind to [OnModelAfterCreateSuccess] or [OnModelAfterCreateError] hooks.
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelCreateExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelAfterCreateSuccess is triggered after each successful
+ * Model DB create persistence.
+ *
+ * Note that when a Model is persisted as part of a transaction,
+ * this hook is delayed and executed only AFTER the transaction has been committed.
+ * This hook is NOT triggered in case the transaction rollbacks
+ * (aka. when the model wasn't persisted).
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelAfterCreateSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelAfterCreateError is triggered after each failed
+ * Model DB create persistence.
+ *
+ * Note that the execution of this hook is either immediate or delayed
+ * depending on the error:
+ * ```
+ * - "immediate" on App.Save() failure
+ * - "delayed" on transaction rollback
+ * ```
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelAfterCreateError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelUpdate is triggered every time when a new model is being updated
+ * (e.g. triggered by App.Save()).
+ *
+ * Operations BEFORE the e.Next() execute before the model validation
+ * and the UPDATE DB statement.
+ *
+ * Operations AFTER the e.Next() execute after the model validation
+ * and the UPDATE DB statement.
+ *
+ * Note that successful execution doesn't guarantee that the model
+ * is persisted in the database since its wrapping transaction may
+ * not have been committed yet.
+ * If you want to listen to only the actual persisted events, you can
+ * bind to [OnModelAfterUpdateSuccess] or [OnModelAfterUpdateError] hooks.
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelUpdate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelUpdateExecute is triggered after successful Model validation
+ * and right before the model UPDATE DB statement execution.
+ *
+ * Usually it is triggered as part of the App.Save() in the following firing order:
+ * OnModelUpdate {
+ * ```
+ * -> OnModelValidate (skipped with App.SaveNoValidate())
+ * -> OnModelUpdateExecute
+ * ```
+ * }
+ *
+ * Note that successful execution doesn't guarantee that the model
+ * is persisted in the database since its wrapping transaction may have been
+ * committed yet.
+ * If you want to listen to only the actual persisted events,
+ * you can bind to [OnModelAfterUpdateSuccess] or [OnModelAfterUpdateError] hooks.
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelUpdateExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelAfterUpdateSuccess is triggered after each successful
+ * Model DB update persistence.
+ *
+ * Note that when a Model is persisted as part of a transaction,
+ * this hook is delayed and executed only AFTER the transaction has been committed.
+ * This hook is NOT triggered in case the transaction rollbacks
+ * (aka. when the model changes weren't persisted).
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelAfterUpdateError is triggered after each failed
+ * Model DB update persistence.
+ *
+ * Note that the execution of this hook is either immediate or delayed
+ * depending on the error:
+ * ```
+ * - "immediate" on App.Save() failure
+ * - "delayed" on transaction rollback
+ * ```
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelAfterUpdateError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelDelete is triggered every time when a new model is being deleted
+ * (e.g. triggered by App.Delete()).
+ *
+ * Note that successful execution doesn't guarantee that the model
+ * is deleted from the database since its wrapping transaction may
+ * not have been committed yet.
+ * If you want to listen to only the actual persisted deleted events, you can
+ * bind to [OnModelAfterDeleteSuccess] or [OnModelAfterDeleteError] hooks.
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelDelete(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelUpdateExecute is triggered right before the model
+ * DELETE DB statement execution.
+ *
+ * Usually it is triggered as part of the App.Delete() in the following firing order:
+ * OnModelDelete {
+ * ```
+ * -> (internal delete checks)
+ * -> OnModelDeleteExecute
+ * ```
+ * }
+ *
+ * Note that successful execution doesn't guarantee that the model
+ * is deleted from the database since its wrapping transaction may
+ * not have been committed yet.
+ * If you want to listen to only the actual persisted deleted events, you can
+ * bind to [OnModelAfterDeleteSuccess] or [OnModelAfterDeleteError] hooks.
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelDeleteExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelAfterDeleteSuccess is triggered after each successful
+ * Model DB delete persistence.
+ *
+ * Note that when a Model is deleted as part of a transaction,
+ * this hook is delayed and executed only AFTER the transaction has been committed.
+ * This hook is NOT triggered in case the transaction rollbacks
+ * (aka. when the model delete wasn't persisted).
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnModelAfterDeleteError is triggered after each failed
+ * Model DB delete persistence.
+ *
+ * Note that the execution of this hook is either immediate or delayed
+ * depending on the error:
+ * ```
+ * - "immediate" on App.Delete() failure
+ * - "delayed" on transaction rollback
+ * ```
+ *
+ * For convenience, if you want to listen to only the Record models
+ * events without doing manual type assertion, you can attach to the OnRecord* proxy hooks.
+ *
+ * If the optional "tags" list (Collection id/name, Model table name, etc.) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onModelAfterDeleteError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordEnrich is triggered every time when a record is enriched
+ * (as part of the builtin Record responses, during realtime message seriazation, or when [apis.EnrichRecord] is invoked).
+ *
+ * It could be used for example to redact/hide or add computed temporary
+ * Record model props only for the specific request info. For example:
+ *
+ * app.OnRecordEnrich("posts").BindFunc(func(e core.*RecordEnrichEvent) {
+ * ```
+ * // hide one or more fields
+ * e.Record.Hide("role")
+ *
+ * // add new custom field for registered users
+ * if e.RequestInfo.Auth != nil && e.RequestInfo.Auth.Collection().Name == "users" {
+ * e.Record.WithCustomData(true) // for security requires explicitly allowing it
+ * e.Record.Set("computedScore", e.Record.GetInt("score") * e.RequestInfo.Auth.GetInt("baseScore"))
+ * }
+ *
+ * return e.Next()
+ * ```
+ * })
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordEnrich(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordValidate is a Record proxy model hook of [OnModelValidate].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordValidate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordCreate is a Record proxy model hook of [OnModelCreate].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordCreate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordCreateExecute is a Record proxy model hook of [OnModelCreateExecute].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordCreateExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAfterCreateSuccess is a Record proxy model hook of [OnModelAfterCreateSuccess].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAfterCreateSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAfterCreateError is a Record proxy model hook of [OnModelAfterCreateError].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAfterCreateError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordUpdate is a Record proxy model hook of [OnModelUpdate].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordUpdate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordUpdateExecute is a Record proxy model hook of [OnModelUpdateExecute].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordUpdateExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAfterUpdateSuccess is a Record proxy model hook of [OnModelAfterUpdateSuccess].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAfterUpdateError is a Record proxy model hook of [OnModelAfterUpdateError].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAfterUpdateError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordDelete is a Record proxy model hook of [OnModelDelete].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordDelete(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordDeleteExecute is a Record proxy model hook of [OnModelDeleteExecute].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordDeleteExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAfterDeleteSuccess is a Record proxy model hook of [OnModelAfterDeleteSuccess].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAfterDeleteError is a Record proxy model hook of [OnModelAfterDeleteError].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAfterDeleteError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionValidate is a Collection proxy model hook of [OnModelValidate].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionValidate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionCreate is a Collection proxy model hook of [OnModelCreate].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionCreate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionCreateExecute is a Collection proxy model hook of [OnModelCreateExecute].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionCreateExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionAfterCreateSuccess is a Collection proxy model hook of [OnModelAfterCreateSuccess].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionAfterCreateSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionAfterCreateError is a Collection proxy model hook of [OnModelAfterCreateError].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionAfterCreateError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionUpdate is a Collection proxy model hook of [OnModelUpdate].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionUpdate(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionUpdateExecute is a Collection proxy model hook of [OnModelUpdateExecute].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionUpdateExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionAfterUpdateSuccess is a Collection proxy model hook of [OnModelAfterUpdateSuccess].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionAfterUpdateError is a Collection proxy model hook of [OnModelAfterUpdateError].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionAfterUpdateError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionDelete is a Collection proxy model hook of [OnModelDelete].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionDelete(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionDeleteExecute is a Collection proxy model hook of [OnModelDeleteExecute].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionDeleteExecute(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionAfterDeleteSuccess is a Collection proxy model hook of [OnModelAfterDeleteSuccess].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionAfterDeleteError is a Collection proxy model hook of [OnModelAfterDeleteError].
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onCollectionAfterDeleteError(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnMailerSend hook is triggered every time when a new email is
+ * being send using the [App.NewMailClient()] instance.
+ *
+ * It allows intercepting the email message or to use a custom mailer client.
+ */
+ onMailerSend(): (hook.Hook)
+ /**
+ * OnMailerRecordAuthAlertSend hook is triggered when
+ * sending a new device login auth alert email, allowing you to
+ * intercept and customize the email message that is being sent.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onMailerRecordAuthAlertSend(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnMailerBeforeRecordResetPasswordSend hook is triggered when
+ * sending a password reset email to an auth record, allowing
+ * you to intercept and customize the email message that is being sent.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onMailerRecordPasswordResetSend(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnMailerBeforeRecordVerificationSend hook is triggered when
+ * sending a verification email to an auth record, allowing
+ * you to intercept and customize the email message that is being sent.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onMailerRecordVerificationSend(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnMailerRecordEmailChangeSend hook is triggered when sending a
+ * confirmation new address email to an auth record, allowing
+ * you to intercept and customize the email message that is being sent.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onMailerRecordEmailChangeSend(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnMailerRecordOTPSend hook is triggered when sending an OTP email
+ * to an auth record, allowing you to intercept and customize the
+ * email message that is being sent.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onMailerRecordOTPSend(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRealtimeConnectRequest hook is triggered when establishing the SSE client connection.
+ *
+ * Any execution after e.Next() of a hook handler happens after the client disconnects.
+ */
+ onRealtimeConnectRequest(): (hook.Hook)
+ /**
+ * OnRealtimeMessageSend hook is triggered when sending an SSE message to a client.
+ */
+ onRealtimeMessageSend(): (hook.Hook)
+ /**
+ * OnRealtimeSubscribeRequest hook is triggered when updating the
+ * client subscriptions, allowing you to further validate and
+ * modify the submitted change.
+ */
+ onRealtimeSubscribeRequest(): (hook.Hook)
+ /**
+ * OnSettingsListRequest hook is triggered on each API Settings list request.
+ *
+ * Could be used to validate or modify the response before returning it to the client.
+ */
+ onSettingsListRequest(): (hook.Hook)
+ /**
+ * OnSettingsUpdateRequest hook is triggered on each API Settings update request.
+ *
+ * Could be used to additionally validate the request data or
+ * implement completely different persistence behavior.
+ */
+ onSettingsUpdateRequest(): (hook.Hook)
+ /**
+ * OnSettingsReload hook is triggered every time when the App.Settings()
+ * is being replaced with a new state.
+ *
+ * Calling App.Settings() after e.Next() returns the new state.
+ */
+ onSettingsReload(): (hook.Hook)
+ /**
+ * OnFileDownloadRequest hook is triggered before each API File download request.
+ *
+ * Could be used to validate or modify the file response before
+ * returning it to the client.
+ */
+ onFileDownloadRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnFileBeforeTokenRequest hook is triggered on each auth file token API request.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onFileTokenRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAuthRequest hook is triggered on each successful API
+ * record authentication request (sign-in, token refresh, etc.).
+ *
+ * Could be used to additionally validate or modify the authenticated
+ * record data and token.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAuthRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAuthWithPasswordRequest hook is triggered on each
+ * Record auth with password API request.
+ *
+ * [RecordAuthWithPasswordRequestEvent.Record] could be nil if no matching identity is found, allowing
+ * you to manually locate a different Record model (by reassigning [RecordAuthWithPasswordRequestEvent.Record]).
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAuthWithPasswordRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAuthWithOAuth2Request hook is triggered on each Record
+ * OAuth2 sign-in/sign-up API request (after token exchange and before external provider linking).
+ *
+ * If [RecordAuthWithOAuth2RequestEvent.Record] is not set, then the OAuth2
+ * request will try to create a new auth Record.
+ *
+ * To assign or link a different existing record model you can
+ * change the [RecordAuthWithOAuth2RequestEvent.Record] field.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAuthWithOAuth2Request(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAuthRefreshRequest hook is triggered on each Record
+ * auth refresh API request (right before generating a new auth token).
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different auth refresh behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAuthRefreshRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordRequestPasswordResetRequest hook is triggered on
+ * each Record request password reset API request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different password reset behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordRequestPasswordResetRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordConfirmPasswordResetRequest hook is triggered on
+ * each Record confirm password reset API request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordConfirmPasswordResetRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordRequestVerificationRequest hook is triggered on
+ * each Record request verification API request.
+ *
+ * Could be used to additionally validate the loaded request data or implement
+ * completely different verification behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordRequestVerificationRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordConfirmVerificationRequest hook is triggered on each
+ * Record confirm verification API request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordConfirmVerificationRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordRequestEmailChangeRequest hook is triggered on each
+ * Record request email change API request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different request email change behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordRequestEmailChangeRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordConfirmEmailChangeRequest hook is triggered on each
+ * Record confirm email change API request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordConfirmEmailChangeRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordRequestOTPRequest hook is triggered on each Record
+ * request OTP API request.
+ *
+ * [RecordCreateOTPRequestEvent.Record] could be nil if no matching identity is found, allowing
+ * you to manually create or locate a different Record model (by reassigning [RecordCreateOTPRequestEvent.Record]).
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordRequestOTPRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordAuthWithOTPRequest hook is triggered on each Record
+ * auth with OTP API request.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordAuthWithOTPRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordsListRequest hook is triggered on each API Records list request.
+ *
+ * Could be used to validate or modify the response before returning it to the client.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordsListRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordViewRequest hook is triggered on each API Record view request.
+ *
+ * Could be used to validate or modify the response before returning it to the client.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordViewRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordCreateRequest hook is triggered on each API Record create request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordCreateRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordUpdateRequest hook is triggered on each API Record update request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordUpdateRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnRecordDeleteRequest hook is triggered on each API Record delete request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different delete behavior.
+ *
+ * If the optional "tags" list (Collection ids or names) is specified,
+ * then all event handlers registered via the created hook will be
+ * triggered and called only if their event data origin matches the tags.
+ */
+ onRecordDeleteRequest(...tags: string[]): (hook.TaggedHook)
+ /**
+ * OnCollectionsListRequest hook is triggered on each API Collections list request.
+ *
+ * Could be used to validate or modify the response before returning it to the client.
+ */
+ onCollectionsListRequest(): (hook.Hook)
+ /**
+ * OnCollectionViewRequest hook is triggered on each API Collection view request.
+ *
+ * Could be used to validate or modify the response before returning it to the client.
+ */
+ onCollectionViewRequest(): (hook.Hook)
+ /**
+ * OnCollectionCreateRequest hook is triggered on each API Collection create request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ */
+ onCollectionCreateRequest(): (hook.Hook)
+ /**
+ * OnCollectionUpdateRequest hook is triggered on each API Collection update request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different persistence behavior.
+ */
+ onCollectionUpdateRequest(): (hook.Hook)
+ /**
+ * OnCollectionDeleteRequest hook is triggered on each API Collection delete request.
+ *
+ * Could be used to additionally validate the request data or implement
+ * completely different delete behavior.
+ */
+ onCollectionDeleteRequest(): (hook.Hook)
+ /**
+ * OnCollectionsBeforeImportRequest hook is triggered on each API
+ * collections import request.
+ *
+ * Could be used to additionally validate the imported collections or
+ * to implement completely different import behavior.
+ */
+ onCollectionsImportRequest(): (hook.Hook)
+ /**
+ * OnBatchRequest hook is triggered on each API batch request.
+ *
+ * Could be used to additionally validate or modify the submitted batch requests.
+ */
+ onBatchRequest(): (hook.Hook)
+ }
+ // @ts-ignore
+ import validation = ozzo_validation
+ /**
+ * AuthOrigin defines a Record proxy for working with the authOrigins collection.
+ */
+ type _skGzCCS = Record
+ interface AuthOrigin extends _skGzCCS {
+ }
+ interface newAuthOrigin {
+ /**
+ * NewAuthOrigin instantiates and returns a new blank *AuthOrigin model.
+ *
+ * Example usage:
+ *
+ * ```
+ * origin := core.NewOrigin(app)
+ * origin.SetRecordRef(user.Id)
+ * origin.SetCollectionRef(user.Collection().Id)
+ * origin.SetFingerprint("...")
+ * app.Save(origin)
+ * ```
+ */
+ (app: App): (AuthOrigin)
+ }
+ interface AuthOrigin {
+ /**
+ * PreValidate implements the [PreValidator] interface and checks
+ * whether the proxy is properly loaded.
+ */
+ preValidate(ctx: context.Context, app: App): void
+ }
+ interface AuthOrigin {
+ /**
+ * ProxyRecord returns the proxied Record model.
+ */
+ proxyRecord(): (Record)
+ }
+ interface AuthOrigin {
+ /**
+ * SetProxyRecord loads the specified record model into the current proxy.
+ */
+ setProxyRecord(record: Record): void
+ }
+ interface AuthOrigin {
+ /**
+ * CollectionRef returns the "collectionRef" field value.
+ */
+ collectionRef(): string
+ }
+ interface AuthOrigin {
+ /**
+ * SetCollectionRef updates the "collectionRef" record field value.
+ */
+ setCollectionRef(collectionId: string): void
+ }
+ interface AuthOrigin {
+ /**
+ * RecordRef returns the "recordRef" record field value.
+ */
+ recordRef(): string
+ }
+ interface AuthOrigin {
+ /**
+ * SetRecordRef updates the "recordRef" record field value.
+ */
+ setRecordRef(recordId: string): void
+ }
+ interface AuthOrigin {
+ /**
+ * Fingerprint returns the "fingerprint" record field value.
+ */
+ fingerprint(): string
+ }
+ interface AuthOrigin {
+ /**
+ * SetFingerprint updates the "fingerprint" record field value.
+ */
+ setFingerprint(fingerprint: string): void
+ }
+ interface AuthOrigin {
+ /**
+ * Created returns the "created" record field value.
+ */
+ created(): types.DateTime
+ }
+ interface AuthOrigin {
+ /**
+ * Updated returns the "updated" record field value.
+ */
+ updated(): types.DateTime
+ }
+ interface BaseApp {
+ /**
+ * FindAllAuthOriginsByRecord returns all AuthOrigin models linked to the provided auth record (in DESC order).
+ */
+ findAllAuthOriginsByRecord(authRecord: Record): Array<(AuthOrigin | undefined)>
+ }
+ interface BaseApp {
+ /**
+ * FindAllAuthOriginsByCollection returns all AuthOrigin models linked to the provided collection (in DESC order).
+ */
+ findAllAuthOriginsByCollection(collection: Collection): Array<(AuthOrigin | undefined)>
+ }
+ interface BaseApp {
+ /**
+ * FindAuthOriginById returns a single AuthOrigin model by its id.
+ */
+ findAuthOriginById(id: string): (AuthOrigin)
+ }
+ interface BaseApp {
+ /**
+ * FindAuthOriginByRecordAndFingerprint returns a single AuthOrigin model
+ * by its authRecord relation and fingerprint.
+ */
+ findAuthOriginByRecordAndFingerprint(authRecord: Record, fingerprint: string): (AuthOrigin)
+ }
+ interface BaseApp {
+ /**
+ * DeleteAllAuthOriginsByRecord deletes all AuthOrigin models associated with the provided record.
+ *
+ * Returns a combined error with the failed deletes.
+ */
+ deleteAllAuthOriginsByRecord(authRecord: Record): void
+ }
+ /**
+ * FilesManager defines an interface with common methods that files manager models should implement.
+ */
+ interface FilesManager {
+ [key:string]: any;
+ /**
+ * BaseFilesPath returns the storage dir path used by the interface instance.
+ */
+ baseFilesPath(): string
+ }
+ /**
+ * DBConnectFunc defines a database connection initialization function.
+ */
+ interface DBConnectFunc {(dbPath: string): (dbx.DB) }
+ /**
+ * BaseAppConfig defines a BaseApp configuration option
+ */
+ interface BaseAppConfig {
+ dbConnect: DBConnectFunc
+ dataDir: string
+ encryptionEnv: string
+ queryTimeout: time.Duration
+ dataMaxOpenConns: number
+ dataMaxIdleConns: number
+ auxMaxOpenConns: number
+ auxMaxIdleConns: number
+ isDev: boolean
+ }
+ /**
+ * BaseApp implements CoreApp and defines the base PocketBase app structure.
+ */
+ interface BaseApp {
+ }
+ interface newBaseApp {
+ /**
+ * NewBaseApp creates and returns a new BaseApp instance
+ * configured with the provided arguments.
+ *
+ * To initialize the app, you need to call `app.Bootstrap()`.
+ */
+ (config: BaseAppConfig): (BaseApp)
+ }
+ interface BaseApp {
+ /**
+ * UnsafeWithoutHooks returns a shallow copy of the current app WITHOUT any registered hooks.
+ *
+ * NB! Note that using the returned app instance may cause data integrity errors
+ * since the Record validations and data normalizations (including files uploads)
+ * rely on the app hooks to work.
+ */
+ unsafeWithoutHooks(): App
+ }
+ interface BaseApp {
+ /**
+ * Logger returns the default app logger.
+ *
+ * If the application is not bootstrapped yet, fallbacks to slog.Default().
+ */
+ logger(): (slog.Logger)
+ }
+ interface BaseApp {
+ /**
+ * TxInfo returns the transaction associated with the current app instance (if any).
+ *
+ * Could be used if you want to execute indirectly a function after
+ * the related app transaction completes using `app.TxInfo().OnAfterFunc(callback)`.
+ */
+ txInfo(): (TxAppInfo)
+ }
+ interface BaseApp {
+ /**
+ * IsTransactional checks if the current app instance is part of a transaction.
+ */
+ isTransactional(): boolean
+ }
+ interface BaseApp {
+ /**
+ * IsBootstrapped checks if the application was initialized
+ * (aka. whether Bootstrap() was called).
+ */
+ isBootstrapped(): boolean
+ }
+ interface BaseApp {
+ /**
+ * Bootstrap initializes the application
+ * (aka. create data dir, open db connections, load settings, etc.).
+ *
+ * It will call ResetBootstrapState() if the application was already bootstrapped.
+ */
+ bootstrap(): void
+ }
+ interface closer {
+ [key:string]: any;
+ close(): void
+ }
+ interface BaseApp {
+ /**
+ * ResetBootstrapState releases the initialized core app resources
+ * (closing db connections, stopping cron ticker, etc.).
+ */
+ resetBootstrapState(): void
+ }
+ interface BaseApp {
+ /**
+ * DB returns the default app data.db builder instance.
+ *
+ * To minimize SQLITE_BUSY errors, it automatically routes the
+ * SELECT queries to the underlying concurrent db pool and everything
+ * else to the nonconcurrent one.
+ *
+ * For more finer control over the used connections pools you can
+ * call directly ConcurrentDB() or NonconcurrentDB().
+ */
+ db(): dbx.Builder
+ }
+ interface BaseApp {
+ /**
+ * ConcurrentDB returns the concurrent app data.db builder instance.
+ *
+ * This method is used mainly internally for executing db read
+ * operations in a concurrent/non-blocking manner.
+ *
+ * Most users should use simply DB() as it will automatically
+ * route the query execution to ConcurrentDB() or NonconcurrentDB().
+ *
+ * In a transaction the ConcurrentDB() and NonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ concurrentDB(): dbx.Builder
+ }
+ interface BaseApp {
+ /**
+ * NonconcurrentDB returns the nonconcurrent app data.db builder instance.
+ *
+ * The returned db instance is limited only to a single open connection,
+ * meaning that it can process only 1 db operation at a time (other queries queue up).
+ *
+ * This method is used mainly internally and in the tests to execute write
+ * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors.
+ *
+ * Most users should use simply DB() as it will automatically
+ * route the query execution to ConcurrentDB() or NonconcurrentDB().
+ *
+ * In a transaction the ConcurrentDB() and NonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ nonconcurrentDB(): dbx.Builder
+ }
+ interface BaseApp {
+ /**
+ * AuxDB returns the app auxiliary.db builder instance.
+ *
+ * To minimize SQLITE_BUSY errors, it automatically routes the
+ * SELECT queries to the underlying concurrent db pool and everything
+ * else to the nonconcurrent one.
+ *
+ * For more finer control over the used connections pools you can
+ * call directly AuxConcurrentDB() or AuxNonconcurrentDB().
+ */
+ auxDB(): dbx.Builder
+ }
+ interface BaseApp {
+ /**
+ * AuxConcurrentDB returns the concurrent app auxiliary.db builder instance.
+ *
+ * This method is used mainly internally for executing db read
+ * operations in a concurrent/non-blocking manner.
+ *
+ * Most users should use simply AuxDB() as it will automatically
+ * route the query execution to AuxConcurrentDB() or AuxNonconcurrentDB().
+ *
+ * In a transaction the AuxConcurrentDB() and AuxNonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ auxConcurrentDB(): dbx.Builder
+ }
+ interface BaseApp {
+ /**
+ * AuxNonconcurrentDB returns the nonconcurrent app auxiliary.db builder instance.
+ *
+ * The returned db instance is limited only to a single open connection,
+ * meaning that it can process only 1 db operation at a time (other queries queue up).
+ *
+ * This method is used mainly internally and in the tests to execute write
+ * (save/delete) db operations as it helps with minimizing the SQLITE_BUSY errors.
+ *
+ * Most users should use simply AuxDB() as it will automatically
+ * route the query execution to AuxConcurrentDB() or AuxNonconcurrentDB().
+ *
+ * In a transaction the AuxConcurrentDB() and AuxNonconcurrentDB() refer to the same *dbx.TX instance.
+ */
+ auxNonconcurrentDB(): dbx.Builder
+ }
+ interface BaseApp {
+ /**
+ * DataDir returns the app data directory path.
+ */
+ dataDir(): string
+ }
+ interface BaseApp {
+ /**
+ * EncryptionEnv returns the name of the app secret env key
+ * (currently used primarily for optional settings encryption but this may change in the future).
+ */
+ encryptionEnv(): string
+ }
+ interface BaseApp {
+ /**
+ * IsDev returns whether the app is in dev mode.
+ *
+ * When enabled logs, executed sql statements, etc. are printed to the stderr.
+ */
+ isDev(): boolean
+ }
+ interface BaseApp {
+ /**
+ * Settings returns the loaded app settings.
+ */
+ settings(): (Settings)
+ }
+ interface BaseApp {
+ /**
+ * Store returns the app runtime store.
+ */
+ store(): (store.Store)
+ }
+ interface BaseApp {
+ /**
+ * Cron returns the app cron instance.
+ */
+ cron(): (cron.Cron)
+ }
+ interface BaseApp {
+ /**
+ * SubscriptionsBroker returns the app realtime subscriptions broker instance.
+ */
+ subscriptionsBroker(): (subscriptions.Broker)
+ }
+ interface BaseApp {
+ /**
+ * NewMailClient creates and returns a new SMTP or Sendmail client
+ * based on the current app settings.
+ */
+ newMailClient(): mailer.Mailer
+ }
+ interface BaseApp {
+ /**
+ * NewFilesystem creates a new local or S3 filesystem instance
+ * for managing regular app files (ex. record uploads)
+ * based on the current app settings.
+ *
+ * NB! Make sure to call Close() on the returned result
+ * after you are done working with it.
+ */
+ newFilesystem(): (filesystem.System)
+ }
+ interface BaseApp {
+ /**
+ * NewBackupsFilesystem creates a new local or S3 filesystem instance
+ * for managing app backups based on the current app settings.
+ *
+ * NB! Make sure to call Close() on the returned result
+ * after you are done working with it.
+ */
+ newBackupsFilesystem(): (filesystem.System)
+ }
+ interface BaseApp {
+ /**
+ * Restart restarts (aka. replaces) the current running application process.
+ *
+ * NB! It relies on execve which is supported only on UNIX based systems.
+ */
+ restart(): void
+ }
+ interface BaseApp {
+ /**
+ * RunSystemMigrations applies all new migrations registered in the [core.SystemMigrations] list.
+ */
+ runSystemMigrations(): void
+ }
+ interface BaseApp {
+ /**
+ * RunAppMigrations applies all new migrations registered in the [CoreAppMigrations] list.
+ */
+ runAppMigrations(): void
+ }
+ interface BaseApp {
+ /**
+ * RunAllMigrations applies all system and app migrations
+ * (aka. from both [core.SystemMigrations] and [CoreAppMigrations]).
+ */
+ runAllMigrations(): void
+ }
+ interface BaseApp {
+ onBootstrap(): (hook.Hook)
+ }
+ interface BaseApp {
+ onServe(): (hook.Hook)
+ }
+ interface BaseApp {
+ onTerminate(): (hook.Hook)
+ }
+ interface BaseApp {
+ onBackupCreate(): (hook.Hook)
+ }
+ interface BaseApp {
+ onBackupRestore(): (hook.Hook)
+ }
+ interface BaseApp {
+ onModelCreate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelCreateExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelAfterCreateSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelAfterCreateError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelUpdate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelUpdateExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelAfterUpdateError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelValidate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelDelete(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelDeleteExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onModelAfterDeleteError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordEnrich(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordValidate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordCreate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordCreateExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAfterCreateSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAfterCreateError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordUpdate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordUpdateExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAfterUpdateError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordDelete(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordDeleteExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAfterDeleteError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionValidate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionCreate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionCreateExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionAfterCreateSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionAfterCreateError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionUpdate(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionUpdateExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionAfterUpdateSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionAfterUpdateError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionDelete(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionDeleteExecute(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionAfterDeleteSuccess(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionAfterDeleteError(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onMailerSend(): (hook.Hook)
+ }
+ interface BaseApp {
+ onMailerRecordPasswordResetSend(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onMailerRecordVerificationSend(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onMailerRecordEmailChangeSend(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onMailerRecordOTPSend(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onMailerRecordAuthAlertSend(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRealtimeConnectRequest(): (hook.Hook)
+ }
+ interface BaseApp {
+ onRealtimeMessageSend(): (hook.Hook)
+ }
+ interface BaseApp {
+ onRealtimeSubscribeRequest(): (hook.Hook)
+ }
+ interface BaseApp {
+ onSettingsListRequest(): (hook.Hook)
+ }
+ interface BaseApp {
+ onSettingsUpdateRequest(): (hook.Hook)
+ }
+ interface BaseApp {
+ onSettingsReload(): (hook.Hook)
+ }
+ interface BaseApp {
+ onFileDownloadRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onFileTokenRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAuthRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAuthWithPasswordRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAuthWithOAuth2Request(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAuthRefreshRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordRequestPasswordResetRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordConfirmPasswordResetRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordRequestVerificationRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordConfirmVerificationRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordRequestEmailChangeRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordConfirmEmailChangeRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordRequestOTPRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordAuthWithOTPRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordsListRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordViewRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordCreateRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordUpdateRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onRecordDeleteRequest(...tags: string[]): (hook.TaggedHook)
+ }
+ interface BaseApp {
+ onCollectionsListRequest(): (hook.Hook