diff --git a/example/convex/_generated/api.d.ts b/example/convex/_generated/api.d.ts index 3753873..51b26e9 100644 --- a/example/convex/_generated/api.d.ts +++ b/example/convex/_generated/api.d.ts @@ -8,6 +8,7 @@ * @module */ +import type * as batchedWrites from "../batchedWrites.js"; import type * as btree from "../btree.js"; import type * as crons from "../crons.js"; import type * as leaderboard from "../leaderboard.js"; @@ -23,6 +24,7 @@ import type { } from "convex/server"; declare const fullApi: ApiFromModules<{ + batchedWrites: typeof batchedWrites; btree: typeof btree; crons: typeof crons; leaderboard: typeof leaderboard; @@ -65,5 +67,6 @@ export declare const components: { photos: import("@convex-dev/aggregate/_generated/component.js").ComponentApi<"photos">; stats: import("@convex-dev/aggregate/_generated/component.js").ComponentApi<"stats">; btreeAggregate: import("@convex-dev/aggregate/_generated/component.js").ComponentApi<"btreeAggregate">; + batchedWrites: import("@convex-dev/aggregate/_generated/component.js").ComponentApi<"batchedWrites">; migrations: import("@convex-dev/migrations/_generated/component.js").ComponentApi<"migrations">; }; diff --git a/example/convex/batchedWrites.ts b/example/convex/batchedWrites.ts new file mode 100644 index 0000000..32c2cdb --- /dev/null +++ b/example/convex/batchedWrites.ts @@ -0,0 +1,321 @@ +/** + * Example of using the batch API for efficient writes. + * + * This demonstrates how to use the `.buffer()` method to queue write + * operations and flush them in a batch, reducing the number of mutations + * and improving performance. + */ + +import { DirectAggregate } from "@convex-dev/aggregate"; +import { mutation } from "./_generated/server"; +import { components } from "./_generated/api.js"; +import { v } from "convex/values"; +import { customMutation } from "convex-helpers/server/customFunctions"; + +const aggregate = new DirectAggregate<{ + Key: number; + Id: string; +}>(components.batchedWrites); + +/** + * Basic example: Enable buffering, queue operations, then flush manually. + */ +export const basicBatchedWrites = mutation({ + args: { + count: v.number(), + }, + handler: async (ctx, { count }) => { + // Enable buffering mode - modifies the aggregate instance in place + aggregate.buffer(true); + + // Queue multiple insert operations + for (let i = 0; i < count; i++) { + await aggregate.insert(ctx, { + key: i, + id: `item-${i}`, + sumValue: i * 10, + }); + } + + // Disable buffering after we're done + aggregate.buffer(false); + // Flush all buffered operations in a single batch + await aggregate.flush(ctx); + + // Read operations work normally (and auto-flush if needed) + const total = await aggregate.count(ctx); + + return { inserted: count, total }; + }, +}); + +/** + * Advanced example: Use custom functions with onSuccess callback. + * + * This pattern is useful when you are also using triggers, to avoid all + * triggered writes calling the component individually. + */ + +// Create a custom mutation that uses buffered aggregate +const mutationWithBuffering = customMutation(mutation, { + args: {}, + input: async () => { + aggregate.buffer(true); + return { + ctx: {}, + args: {}, + onSuccess: async ({ ctx }) => { + await aggregate.flush(ctx); + }, + }; + }, +}); + +/** + * Example using custom function with onSuccess callback. + * + * This demonstrates the recommended pattern for batching: + * - Enable buffering at the start (in customCtx) + * - Queue operations throughout the function using the global aggregate + * - Flush in the onSuccess callback + */ +export const batchedWritesWithOnSuccess = mutationWithBuffering({ + args: { + items: v.array( + v.object({ + key: v.number(), + id: v.string(), + value: v.number(), + }), + ), + }, + handler: async (ctx, { items }) => { + // Queue all operations - they're stored in memory, not sent yet + // We use the global 'aggregate' instance which has buffering enabled + for (const item of items) { + await aggregate.insert(ctx, { + key: item.key, + id: item.id, + sumValue: item.value, + }); + } + + return { + queued: items.length, + }; + }, +}); + +/** + * Complex example: Mix different operation types in a batch. + */ +export const complexBatchedOperations = mutation({ + args: { + inserts: v.array( + v.object({ + key: v.number(), + id: v.string(), + value: v.number(), + }), + ), + deletes: v.array( + v.object({ + key: v.number(), + id: v.string(), + }), + ), + updates: v.array( + v.object({ + oldKey: v.number(), + newKey: v.number(), + id: v.string(), + value: v.number(), + }), + ), + }, + handler: async (ctx, { inserts, deletes, updates }) => { + // Enable buffering + aggregate.buffer(true); + + // Queue inserts + for (const item of inserts) { + await aggregate.insert(ctx, { + key: item.key, + id: item.id, + sumValue: item.value, + }); + } + + // Queue deletes + for (const item of deletes) { + await aggregate.deleteIfExists(ctx, { + key: item.key, + id: item.id, + }); + } + + // Queue updates (replace operations) + for (const item of updates) { + await aggregate.replaceOrInsert( + ctx, + { key: item.oldKey, id: item.id }, + { key: item.newKey, sumValue: item.value }, + ); + } + + // Flush all operations at once + await aggregate.flush(ctx); + + // Disable buffering + aggregate.buffer(false); + + return { + operations: { + inserts: inserts.length, + deletes: deletes.length, + updates: updates.length, + }, + }; + }, +}); + +/** + * Performance comparison: Batched vs unbatched writes. + */ +export const comparePerformance = mutation({ + args: { + count: v.number(), + useBatching: v.boolean(), + }, + handler: async (ctx, { count, useBatching }) => { + const start = Date.now(); + + if (useBatching) { + // Batched approach + aggregate.buffer(true); + + for (let i = 0; i < count; i++) { + await aggregate.insert(ctx, { + key: 1000000 + i, + id: `perf-test-${i}`, + sumValue: i, + }); + } + + await aggregate.flush(ctx); + aggregate.buffer(false); + } else { + // Unbatched approach + for (let i = 0; i < count; i++) { + await aggregate.insert(ctx, { + key: 1000000 + i, + id: `perf-test-${i}`, + sumValue: i, + }); + } + } + + const duration = Date.now() - start; + + return { + method: useBatching ? "batched" : "unbatched", + count, + durationMs: duration, + }; + }, +}); + +/** + * Example showing automatic flush on read operations. + */ +export const autoFlushOnRead = mutation({ + args: { + count: v.number(), + }, + handler: async (ctx, { count }) => { + // Enable buffering + aggregate.buffer(true); + + // Queue some operations + for (let i = 0; i < count; i++) { + await aggregate.insert(ctx, { + key: 2000000 + i, + id: `auto-flush-${i}`, + sumValue: i, + }); + } + + // This read operation automatically flushes the buffer first + // So we'll see the correct count including the queued operations + const total = await aggregate.count(ctx, { + bounds: { + lower: { key: 2000000, inclusive: true }, + }, + }); + + // Disable buffering + aggregate.buffer(false); + + return { + queued: count, + totalInRange: total, + }; + }, +}); + +/** + * Example: Batch operations with namespace grouping. + * + * When you have operations across multiple namespaces, + * the batch mutation automatically groups them and fetches + * each namespace's tree only once. + */ +export const batchedWritesWithNamespaces = mutation({ + args: { + operations: v.array( + v.object({ + namespace: v.string(), + key: v.number(), + id: v.string(), + value: v.number(), + }), + ), + }, + handler: async (ctx, { operations }) => { + // Create a namespaced aggregate + const namespacedAggregate = new DirectAggregate<{ + Key: number; + Id: string; + Namespace: string; + }>(components.batchedWrites); + + // Enable buffering + namespacedAggregate.buffer(true); + + // Queue operations - they'll be grouped by namespace internally + for (const op of operations) { + await namespacedAggregate.insert(ctx, { + namespace: op.namespace, + key: op.key, + id: op.id, + sumValue: op.value, + }); + } + + // Flush all operations + // The batch mutation will group by namespace automatically + await namespacedAggregate.flush(ctx); + + // Disable buffering + namespacedAggregate.buffer(false); + + // Count unique namespaces + const namespaces = new Set(operations.map((op) => op.namespace)); + + return { + operations: operations.length, + namespaces: namespaces.size, + message: `Processed ${operations.length} operations across ${namespaces.size} namespaces in a single batch`, + }; + }, +}); diff --git a/example/convex/convex.config.ts b/example/convex/convex.config.ts index e58e969..18fbf76 100644 --- a/example/convex/convex.config.ts +++ b/example/convex/convex.config.ts @@ -9,6 +9,7 @@ app.use(aggregate, { name: "music" }); app.use(aggregate, { name: "photos" }); app.use(aggregate, { name: "stats" }); app.use(aggregate, { name: "btreeAggregate" }); +app.use(aggregate, { name: "batchedWrites" }); app.use(migrations); diff --git a/src/client/buffer.test.ts b/src/client/buffer.test.ts new file mode 100644 index 0000000..af30da4 --- /dev/null +++ b/src/client/buffer.test.ts @@ -0,0 +1,50 @@ +import { convexTest } from "convex-test"; +import { expect, test } from "vitest"; +import { DirectAggregate } from "./index.js"; +import { components, modules } from "./setup.test.js"; +import { defineSchema } from "convex/server"; +import { register } from "../test.js"; + +const schema = defineSchema({}); + +function setupTest() { + const t = convexTest(schema, modules); + register(t); + return t; +} + +test("buffer flush in mutation context", async () => { + const t = setupTest(); + + const aggregate = new DirectAggregate<{ + Key: number; + Id: string; + }>(components.aggregate); + + // Test that reading with buffered operations in a mutation works (auto-flushes) + await t.run(async (ctx) => { + aggregate.buffer(true); + await aggregate.insert(ctx, { key: 1, id: "a" }); + await aggregate.insert(ctx, { key: 2, id: "b" }); + + // This should work because we're in a mutation context and it auto-flushes + const count = await aggregate.count(ctx); + expect(count).toBe(2); + + aggregate.buffer(false); + }); + + // Test manual flush + await t.run(async (ctx) => { + aggregate.buffer(true); + await aggregate.insert(ctx, { key: 3, id: "c" }); + + // Manual flush + await aggregate.flush(ctx); + + aggregate.buffer(false); + + const count = await aggregate.count(ctx); + expect(count).toBe(3); + }); +}); diff --git a/src/client/index.ts b/src/client/index.ts index 71145d3..6aae6dc 100644 --- a/src/client/index.ts +++ b/src/client/index.ts @@ -36,6 +36,50 @@ export type Item = { export type { Key, Bound }; +type BufferedOperation = + | { + type: "insert"; + key: any; + value: any; + summand?: number; + namespace?: any; + } + | { + type: "delete"; + key: any; + namespace?: any; + } + | { + type: "replace"; + currentKey: any; + newKey: any; + value: any; + summand?: number; + namespace?: any; + newNamespace?: any; + } + | { + type: "deleteIfExists"; + key: any; + namespace?: any; + } + | { + type: "replaceOrInsert"; + currentKey: any; + newKey: any; + value: any; + summand?: number; + namespace?: any; + newNamespace?: any; + } + | { + type: "insertIfDoesNotExist"; + key: any; + value: any; + summand?: number; + namespace?: any; + }; + /** * Write data to be aggregated, and read aggregated data. * @@ -54,8 +98,60 @@ export class Aggregate< ID extends string, Namespace extends ConvexValue | undefined = undefined, > { + private isBuffering = false; + private operationQueue: BufferedOperation[] = []; + constructor(protected component: ComponentApi) {} + /** + * Enable or disable buffering mode. When buffering is enabled, write operations are + * queued and sent in a batch when flush() is called or when any read + * operation is performed. + * + * Modifies this instance in place and returns it for chaining. + * + * Example usage: + * ```ts + * aggregate.buffer(true); + * aggregate.insert(ctx, { key: 1, id: "a" }); + * aggregate.insert(ctx, { key: 2, id: "b" }); + * await aggregate.flush(ctx); // Send all buffered operations + * ``` + */ + buffer(enabled: boolean): this { + this.isBuffering = enabled; + return this; + } + + /** + * Flush all buffered operations to the database. + * This sends all queued write operations in a single batch mutation. + * Called automatically before any read operation when buffering is enabled. + */ + async flush(ctx: RunMutationCtx): Promise { + if (this.operationQueue.length === 0) { + return; + } + const operations = this.operationQueue; + this.operationQueue = []; + await ctx.runMutation(this.component.public.batch, { + operations, + }); + } + + private async flushBeforeRead(ctx: RunQueryCtx | RunMutationCtx) { + if (this.isBuffering && this.operationQueue.length > 0) { + if (!("runMutation" in ctx)) { + throw new Error( + "Cannot read with buffered operations in a query context. " + + "Either call this from a mutation context, or call flush() before reading, " + + "or disable buffering with .buffer(false).", + ); + } + await this.flush(ctx); + } + } + /// Aggregate queries. /** @@ -65,6 +161,7 @@ export class Aggregate< ctx: RunQueryCtx, ...opts: NamespacedOpts<{ bounds?: Bounds }, Namespace> ): Promise { + await this.flushBeforeRead(ctx); const { count } = await ctx.runQuery( this.component.btree.aggregateBetween, { @@ -82,6 +179,7 @@ export class Aggregate< ctx: RunQueryCtx, queries: NamespacedOptsBatch<{ bounds?: Bounds }, Namespace>, ): Promise { + await this.flushBeforeRead(ctx); const queryArgs = queries.map((query) => { if (!query) { throw new Error("You must pass bounds and/or namespace"); @@ -106,6 +204,7 @@ export class Aggregate< ctx: RunQueryCtx, ...opts: NamespacedOpts<{ bounds?: Bounds }, Namespace> ): Promise { + await this.flushBeforeRead(ctx); const { sum } = await ctx.runQuery(this.component.btree.aggregateBetween, { ...boundsToPositions(opts[0]?.bounds), namespace: namespaceFromOpts(opts), @@ -120,6 +219,7 @@ export class Aggregate< ctx: RunQueryCtx, queries: NamespacedOptsBatch<{ bounds?: Bounds }, Namespace>, ): Promise { + await this.flushBeforeRead(ctx); const queryArgs = queries.map((query) => { if (!query) { throw new Error("You must pass bounds and/or namespace"); @@ -150,6 +250,7 @@ export class Aggregate< offset: number, ...opts: NamespacedOpts<{ bounds?: Bounds }, Namespace> ): Promise> { + await this.flushBeforeRead(ctx); if (offset < 0) { const item = await ctx.runQuery(this.component.btree.atNegativeOffset, { offset: -offset - 1, @@ -175,6 +276,7 @@ export class Aggregate< Namespace >, ): Promise[]> { + await this.flushBeforeRead(ctx); const queryArgs = queries.map((q) => ({ offset: q.offset, ...boundsToPositions(q.bounds), @@ -202,6 +304,7 @@ export class Aggregate< Namespace > ): Promise { + await this.flushBeforeRead(ctx); const { k1, k2 } = boundsToPositions(opts[0]?.bounds); if (opts[0]?.order === "desc") { return await ctx.runQuery(this.component.btree.offsetUntil, { @@ -305,6 +408,7 @@ export class Aggregate< Namespace > ): Promise<{ page: Item[]; cursor: string; isDone: boolean }> { + await this.flushBeforeRead(ctx); const order = opts[0]?.order ?? "asc"; const pageSize = opts[0]?.pageSize ?? 100; const { @@ -373,6 +477,16 @@ export class Aggregate< id: ID, summand?: number, ): Promise { + if (this.isBuffering) { + this.operationQueue.push({ + type: "insert", + key: keyToPosition(key, id), + value: id, + summand, + namespace, + }); + return; + } await ctx.runMutation(this.component.public.insert, { key: keyToPosition(key, id), summand, @@ -386,6 +500,14 @@ export class Aggregate< key: K, id: ID, ): Promise { + if (this.isBuffering) { + this.operationQueue.push({ + type: "delete", + key: keyToPosition(key, id), + namespace, + }); + return; + } await ctx.runMutation(this.component.public.delete_, { key: keyToPosition(key, id), namespace, @@ -400,6 +522,18 @@ export class Aggregate< id: ID, summand?: number, ): Promise { + if (this.isBuffering) { + this.operationQueue.push({ + type: "replace", + currentKey: keyToPosition(currentKey, id), + newKey: keyToPosition(newKey, id), + value: id, + summand, + namespace: currentNamespace, + newNamespace, + }); + return; + } await ctx.runMutation(this.component.public.replace, { currentKey: keyToPosition(currentKey, id), newKey: keyToPosition(newKey, id), @@ -416,6 +550,16 @@ export class Aggregate< id: ID, summand?: number, ): Promise { + if (this.isBuffering) { + this.operationQueue.push({ + type: "insertIfDoesNotExist", + key: keyToPosition(key, id), + value: id, + summand, + namespace, + }); + return; + } await this._replaceOrInsert( ctx, namespace, @@ -432,6 +576,14 @@ export class Aggregate< key: K, id: ID, ): Promise { + if (this.isBuffering) { + this.operationQueue.push({ + type: "deleteIfExists", + key: keyToPosition(key, id), + namespace, + }); + return; + } await ctx.runMutation(this.component.public.deleteIfExists, { key: keyToPosition(key, id), namespace, @@ -446,6 +598,18 @@ export class Aggregate< id: ID, summand?: number, ): Promise { + if (this.isBuffering) { + this.operationQueue.push({ + type: "replaceOrInsert", + currentKey: keyToPosition(currentKey, id), + newKey: keyToPosition(newKey, id), + value: id, + summand, + namespace: currentNamespace, + newNamespace, + }); + return; + } await ctx.runMutation(this.component.public.replaceOrInsert, { currentKey: keyToPosition(currentKey, id), newKey: keyToPosition(newKey, id), @@ -503,6 +667,7 @@ export class Aggregate< cursor?: string, pageSize: number = 100, ): Promise<{ page: Namespace[]; cursor: string; isDone: boolean }> { + await this.flushBeforeRead(ctx); const { page, cursor: newCursor, diff --git a/src/component/_generated/component.ts b/src/component/_generated/component.ts index d3e82f8..04406c0 100644 --- a/src/component/_generated/component.ts +++ b/src/component/_generated/component.ts @@ -170,6 +170,50 @@ export type ComponentApi = >; }; public: { + batch: FunctionReference< + "mutation", + "internal", + { + operations: Array< + | { + key: any; + namespace?: any; + summand?: number; + type: "insert"; + value: any; + } + | { key: any; namespace?: any; type: "delete" } + | { + currentKey: any; + namespace?: any; + newKey: any; + newNamespace?: any; + summand?: number; + type: "replace"; + value: any; + } + | { key: any; namespace?: any; type: "deleteIfExists" } + | { + currentKey: any; + namespace?: any; + newKey: any; + newNamespace?: any; + summand?: number; + type: "replaceOrInsert"; + value: any; + } + | { + key: any; + namespace?: any; + summand?: number; + type: "insertIfDoesNotExist"; + value: any; + } + >; + }, + null, + Name + >; clear: FunctionReference< "mutation", "internal", diff --git a/src/component/btree.ts b/src/component/btree.ts index e35f23f..d6f081c 100644 --- a/src/component/btree.ts +++ b/src/component/btree.ts @@ -45,13 +45,16 @@ function log(s: string) { export async function insertHandler( ctx: { db: DatabaseWriter }, args: { key: Key; value: Value; summand?: number; namespace?: Namespace }, + treeArg?: Doc<"btree">, ) { - const tree = await getOrCreateTree( - ctx.db, - args.namespace, - DEFAULT_MAX_NODE_SIZE, - true, - ); + const tree = + treeArg || + (await getOrCreateTree( + ctx.db, + args.namespace, + DEFAULT_MAX_NODE_SIZE, + true, + )); const summand = args.summand ?? 0; const pushUp = await insertIntoNode(ctx, args.namespace, tree.root, { k: args.key, @@ -80,13 +83,16 @@ export async function insertHandler( export async function deleteHandler( ctx: { db: DatabaseWriter }, args: { key: Key; namespace?: Namespace }, + treeArg?: Doc<"btree">, ) { - const tree = await getOrCreateTree( - ctx.db, - args.namespace, - DEFAULT_MAX_NODE_SIZE, - true, - ); + const tree = + treeArg || + (await getOrCreateTree( + ctx.db, + args.namespace, + DEFAULT_MAX_NODE_SIZE, + true, + )); await deleteFromNode(ctx, args.namespace, tree.root, args.key); const root = (await ctx.db.get(tree.root))!; if (root.items.length === 0 && root.subtrees.length === 1) { diff --git a/src/component/public.ts b/src/component/public.ts index 3b7ff90..816531d 100644 --- a/src/component/public.ts +++ b/src/component/public.ts @@ -171,3 +171,198 @@ export const clear = mutation({ ); }, }); + +/** + * Batch mutation that processes multiple operations efficiently by fetching + * the tree once and passing it to all handlers. + */ +export const batch = mutation({ + args: { + operations: v.array( + v.union( + v.object({ + type: v.literal("insert"), + key: v.any(), + value: v.any(), + summand: v.optional(v.number()), + namespace: v.optional(v.any()), + }), + v.object({ + type: v.literal("delete"), + key: v.any(), + namespace: v.optional(v.any()), + }), + v.object({ + type: v.literal("replace"), + currentKey: v.any(), + newKey: v.any(), + value: v.any(), + summand: v.optional(v.number()), + namespace: v.optional(v.any()), + newNamespace: v.optional(v.any()), + }), + v.object({ + type: v.literal("deleteIfExists"), + key: v.any(), + namespace: v.optional(v.any()), + }), + v.object({ + type: v.literal("replaceOrInsert"), + currentKey: v.any(), + newKey: v.any(), + value: v.any(), + summand: v.optional(v.number()), + namespace: v.optional(v.any()), + newNamespace: v.optional(v.any()), + }), + v.object({ + type: v.literal("insertIfDoesNotExist"), + key: v.any(), + value: v.any(), + summand: v.optional(v.number()), + namespace: v.optional(v.any()), + }), + ), + ), + }, + returns: v.null(), + handler: async (ctx, { operations }) => { + // Group operations by namespace to fetch each tree once + const namespaceGroups = new Map(); + for (const op of operations) { + const namespace = "namespace" in op ? op.namespace : undefined; + // Use a sentinel value for undefined namespace since JSON.stringify(undefined) returns undefined + const key = namespace === undefined ? "__undefined__" : JSON.stringify(namespace); + if (!namespaceGroups.has(key)) { + namespaceGroups.set(key, []); + } + namespaceGroups.get(key)!.push(op); + } + + // Process each namespace group + for (const [namespaceKey, ops] of namespaceGroups.entries()) { + const namespace = namespaceKey === "__undefined__" ? undefined : JSON.parse(namespaceKey); + const tree = await getOrCreateTree( + ctx.db, + namespace, + DEFAULT_MAX_NODE_SIZE, + true, + ); + + // Process operations in order + for (const op of ops) { + if (op.type === "insert") { + await insertHandler( + ctx, + { + key: op.key, + value: op.value, + summand: op.summand, + namespace: op.namespace, + }, + tree, + ); + } else if (op.type === "delete") { + await deleteHandler( + ctx, + { + key: op.key, + namespace: op.namespace, + }, + tree, + ); + } else if (op.type === "replace") { + await deleteHandler( + ctx, + { + key: op.currentKey, + namespace: op.namespace, + }, + tree, + ); + await insertHandler( + ctx, + { + key: op.newKey, + value: op.value, + summand: op.summand, + namespace: op.newNamespace, + }, + tree, + ); + } else if (op.type === "deleteIfExists") { + try { + await deleteHandler( + ctx, + { key: op.key, namespace: op.namespace }, + tree, + ); + } catch (e) { + if ( + e instanceof ConvexError && + e.data?.code === "DELETE_MISSING_KEY" + ) { + continue; + } + throw e; + } + } else if (op.type === "replaceOrInsert") { + try { + await deleteHandler( + ctx, + { + key: op.currentKey, + namespace: op.namespace, + }, + tree, + ); + } catch (e) { + if ( + !(e instanceof ConvexError && e.data?.code === "DELETE_MISSING_KEY") + ) { + throw e; + } + } + await insertHandler( + ctx, + { + key: op.newKey, + value: op.value, + summand: op.summand, + namespace: op.newNamespace, + }, + tree, + ); + } else if (op.type === "insertIfDoesNotExist") { + // insertIfDoesNotExist is implemented as replaceOrInsert + try { + await deleteHandler( + ctx, + { + key: op.key, + namespace: op.namespace, + }, + tree, + ); + } catch (e) { + if ( + !(e instanceof ConvexError && e.data?.code === "DELETE_MISSING_KEY") + ) { + throw e; + } + } + await insertHandler( + ctx, + { + key: op.key, + value: op.value, + summand: op.summand, + namespace: op.namespace, + }, + tree, + ); + } + } + } + }, +});