commit 267e3c15bdd22df3c9950871462c7af63610f79b Author: facebook-github-bot Date: Sun Jun 16 04:28:59 2019 -0700 Initial commit fbshipit-source-id: c79a38536e3c128dce1b2948615b72ec9779ed22 diff --git a/README.md b/README.md new file mode 100644 index 000000000..037478c97 --- /dev/null +++ b/README.md @@ -0,0 +1,172 @@ +## Ent + +> Note: if you edit this file, don't forget to update the [Wiki][wiki] as well. + +### First Installation + +If it is the first time you work with `entc`, you need to compile it manually, +since we don't have any official binary distribution. + +``` +cd fbc/ent/entc/cmd/entc +go build +sudo mv entc /usr/local/bin +``` + +### Creating Schema +If you came here to see how to create your first schema, it's preferred to give `entc` +to do it for you in order to keep the same standard for all projects that use it. +Run the following (replace `User/Group` with your entities): + +``` +entc init User Group +``` + +The schema that was created has 2 methods: `User.Fields` and `User.Edges`. The first defines the fields/properties +of the entity in the graph, and the second defines the edges for other entities (or to itself) in the graph. + +Here are a few examples that will help you to understand what field/edge to declare in your schema +``` +type User struct{ + ent.Schema +} + +func (User) Fields() []ent.Field { + return []ent.Field{ + // "age" defines a field of type int, with a positive validator. + // The validator is called on create or update on this field. + field.Int("age"). + Positive(), + + // "name" defines a field of type string, and overrides the standard + // tag for the generated entity. + field.String("name"). + StructTag(`json:"first_name" graphql:"first_name"`), + + // "last" defines a field of type string, with default (on creation) to "unknown", + // and 2 validators. + field.String("last"). + Default("unknown"). + Match(regexp.MustCompile("[a-zA-Z_]+$")). + Validate(func(s string) error { + if strings.ToLower(s) == s { + return errors.New("last name must begin with uppercase") + } + return nil + }), + } +} + +func (User) Edges() []ent.Edge { + return []ent.Edge{ + // "groups" defines an edge to the Group entity (also a schema in this package). + // The relation type for this edge is many-2-many. + edge.To("groups", Group.Type), + + // "workplace" defines an edge from the Company entity (also a schema in this package). + // The relation type for this edge is many-2-one, and the owner of this edge, is the + // Company entity. + edge.From("workplace", Company.Type).Unique().Ref("employees"), + + // "parent" defines an edge from a User to itself. + edge.To("parent", User.Type).Unique().From("children"), + } +} +``` + +### Code Generation + +After running init, run the codegen on the directory the was created (`ent/schema`). + +``` +entc generate ./ent/schema +``` + +In addition to the "production" code, `entc` generates for you also an `example_test.go` file +with example for each ent in the graph. + +### Working with the generated code + +First, you need to create the `ent.Client` in order to interact with the different builders, +then, use this client to create, update, delete, or query entities. + +``` +package main + +import ( + "log" + + "/ent" + "/ent/user" + "fbc/ent/dialect/sql" +) + +func main() { + ctx := context.Backgorund() + drv, err := sql.Open("mysql", "root:pass@tcp(localhost:3306)/test?charset=utf8&parseTime=True") + if err != nil { + log.Fatal(err) + } + defer drv.Close() + client := ent.NewClient(drv) + + // Create: + + // `client.User` holds the `UserClient`, and `client.User.Create()` returns a new User creator. + a8m, err := client.User. + Create(). + SetAge(30) + SetName("a8m"). + Save(ctx) + if err != nil { + log.Fatal(err) + } + // If you want to ignore the error checks in the code, replace `Save` with `SaveX`. + + // Delete: + + // delete one. + client.User.DeleteOne(a8m).ExecX(ctx) + // delete all. + client.User.Delete().ExecX(ctx) + // delete with condition. + client.User.Delete().Where(user.Name("a8m")).ExecX(ctx) + + // Update: + + // add a user to a group. + a8m = client.User.UpdateOne(a8m).AddGroups(grp).SaveX(ctx) + // delete a user from a group. + a8m = client.User.UpdateOne(a8m).RemoveGroups(grp).SaveX(ctx) + // add user to all groups. + client.Group.Update().AddUsers(a8m).ExecX(ctx) + + // Query: + + // get all groups. + groups := client.Group.Query().AllX(ctx) + // get all groups of a specific user. + groups = a8m.QueryGroups().AllX(ctx) + // query by path. + users := client.Group. + Query(). + Where(group.HasUsers(), group.NameHasPrefix("fb")). + QueryUsers(). + AllX(ctx) + + // Aggregation: + + var v []struct{ + Name string `json:"name"` + Count int `json:"count"` + } + client.User. + Query(). + GroupBy(user.FieldName). + Aggregate(ent.Count()). + ScanX(&v) +} +``` + + +[wiki]: https://our.internmc.facebook.com/intern/wiki/Facebook_Connectivity_(FBC)/Entity_Framework/ diff --git a/dialect/dialect.go b/dialect/dialect.go new file mode 100644 index 000000000..cf6f72e82 --- /dev/null +++ b/dialect/dialect.go @@ -0,0 +1,115 @@ +package dialect + +import ( + "context" + "database/sql/driver" + "fmt" + "log" + + "github.com/google/uuid" +) + +// Dialect names for external usage. +const ( + MySQL = "mysql" + SQLite = "sqlite3" + Neptune = "neptune" +) + +// ExecQuerier wraps the 2 database operations. +type ExecQuerier interface { + // Exec executes a query that doesn't return rows. For example, in SQL, INSERT or UPDATE. + // It scans the result into the pointer v. In SQL, you it's usually sql.Result. + Exec(ctx context.Context, query string, args interface{}, v interface{}) error + // Query executes a query that returns rows, typically a SELECT in SQL. + // It scans the result into the pointer v. In SQL, you it's usually *sql.Rows. + Query(ctx context.Context, query string, args interface{}, v interface{}) error +} + +// Driver is the interface that wraps all necessary operations for ent clients. +type Driver interface { + ExecQuerier + // Tx starts and returns a new transaction. + // The provided context is used until the transaction is committed or rolled back. + Tx(context.Context) (Tx, error) + // Close closes the underlying connection. + Close() error + // Dialect returns the dialect name of the driver. + Dialect() string +} + +// Tx wraps the Exec and Query operations in transaction. +type Tx interface { + ExecQuerier + driver.Tx +} + +// DebugDriver is a driver that logs all driver operations. +type DebugDriver struct { + Driver // underlying driver. + log func(...interface{}) // log function. defaults to log.Println. +} + +// Debug gets a driver and an optional logging function, and returns +// a new debugged-driver that prints all outgoing operations. +func Debug(d Driver, logger ...func(...interface{})) Driver { + drv := &DebugDriver{d, log.Println} + if len(logger) == 1 { + drv.log = logger[0] + } + return drv +} + +// Exec logs its params and calls the underlying driver Exec method. +func (d *DebugDriver) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + d.log(fmt.Sprintf("driver.Exec: query=%v args=%v", query, args)) + return d.Driver.Exec(ctx, query, args, v) +} + +// Query logs its params and calls the underlying driver Query method. +func (d *DebugDriver) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + d.log(fmt.Sprintf("driver.Query: query=%v args=%v", query, args)) + return d.Driver.Query(ctx, query, args, v) +} + +// Tx adds an log-id for the transaction and calls the underlying driver Tx command. +func (d *DebugDriver) Tx(ctx context.Context) (Tx, error) { + tx, err := d.Driver.Tx(ctx) + if err != nil { + return nil, err + } + id := uuid.New().String() + d.log(fmt.Sprintf("driver.Tx(%s): started", id)) + return &DebugTx{tx, id, d.log}, nil +} + +// DebugTx is a driver that logs all transaction operations. +type DebugTx struct { + Tx // underlying transaction. + id string // transaction logging id. + log func(...interface{}) // log function. defaults to fmt.Println. +} + +// Exec logs its params and calls the underlying transaction Exec method. +func (d *DebugTx) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + d.log(fmt.Sprintf("Tx(%s).Exec: query=%v args=%v", d.id, query, args)) + return d.Tx.Exec(ctx, query, args, v) +} + +// Query logs its params and calls the underlying transaction Query method. +func (d *DebugTx) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + d.log(fmt.Sprintf("Tx(%s).Query: query=%v args=%v", d.id, query, args)) + return d.Tx.Query(ctx, query, args, v) +} + +// Commit logs this step and calls the underlying transaction Commit method. +func (d *DebugTx) Commit() error { + d.log(fmt.Sprintf("Tx(%s): committed", d.id)) + return d.Tx.Commit() +} + +// Rollback logs this step and calls the underlying transaction Rollback method. +func (d *DebugTx) Rollback() error { + d.log(fmt.Sprintf("Tx(%s): rollbacked", d.id)) + return d.Tx.Rollback() +} diff --git a/dialect/gremlin.go b/dialect/gremlin.go new file mode 100644 index 000000000..08a41b60c --- /dev/null +++ b/dialect/gremlin.go @@ -0,0 +1,60 @@ +package dialect + +import ( + "context" + "fmt" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" +) + +// Gremlin is a dialect.Client implementation for TinkerPop gremlin. +type Gremlin struct { + *gremlin.Client +} + +// NewGremlin returns a new dialect.Gremlin implementation for gremlin. +func NewGremlin(c *gremlin.Client) *Gremlin { + c.Transport = gremlin.ExpandBindings(c.Transport) + return &Gremlin{c} +} + +// Dialect implements the dialect.Dialect method. +func (Gremlin) Dialect() string { return Neptune } + +// Exec implements the dialect.Exec method. +func (c *Gremlin) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + vr, ok := v.(*gremlin.Response) + if !ok { + return fmt.Errorf("dialect/gremlin: invalid type %T. expect *gremlin.Response", v) + } + bindings, ok := args.(dsl.Bindings) + if !ok { + return fmt.Errorf("dialect/gremlin: invalid type %T. expect map[string]interface{} for bindings", args) + } + res, err := c.Do(ctx, gremlin.NewEvalRequest(query, gremlin.WithBindings(bindings))) + if err != nil { + return err + } + *vr = *res + return nil +} + +// Query implements the dialect.Query method. +func (c *Gremlin) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + return c.Exec(ctx, query, args, v) +} + +// Close is a nop close call. It should close the connection in case of WS client. +func (c *Gremlin) Close() error { return nil } + +// Tx returns a nop transaction. +func (c *Gremlin) Tx(context.Context) (Tx, error) { return c, nil } + +// Commit is a nop commit. +func (c *Gremlin) Commit() error { return nil } + +// Rollback is a nop rollback. +func (c *Gremlin) Rollback() error { return nil } + +var _ Driver = (*Gremlin)(nil) diff --git a/dialect/sql/builder.go b/dialect/sql/builder.go new file mode 100644 index 000000000..1e1c78ea2 --- /dev/null +++ b/dialect/sql/builder.go @@ -0,0 +1,1309 @@ +package sql + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "fbc/ent/dialect" +) + +// Node represents a builder step in the query. +type Node interface { + // Query returns the query representation of the element and its arguments (if any). + Query() (string, []interface{}) +} + +// Nodes are list of queries join with space between them. +type Nodes []Node + +// Query returns query representation of Nodes. +func (n Nodes) Query() (string, []interface{}) { + b := Builder{} + for i := range n { + if i > 0 { + b.Pad() + } + query, args := n[i].Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + return b.String(), b.args +} + +// Builder is a query builder for the sql dsl. +type Builder struct { + bytes.Buffer + args []interface{} +} + +// Append appends the given string as a quoted parameter +func (b *Builder) Append(s string) *Builder { + switch { + case len(s) == 0: + case s != "*" && s[0] != '`' && !isFunc(s) && !isModifier(s): + fmt.Fprintf(b, "`%s`", s) + default: + b.WriteString(s) + } + return b +} + +// AppendComma appends calls Append on all arguments and adds a comma between them. +func (b *Builder) AppendComma(s ...string) *Builder { + for i := range s { + if i > 0 { + b.Comma() + } + b.Append(s[i]) + } + return b +} + +// Arg appends an argument to the builder. +func (b *Builder) Arg(a interface{}) *Builder { + b.WriteString("?") + b.args = append(b.args, a) + return b +} + +// Args appends a list of arguments to the builder. +func (b *Builder) Args(a ...interface{}) *Builder { + for i := range a { + if i > 0 { + b.Comma() + } + b.Arg(a[i]) + } + return b +} + +// Comma adds a comma to the query. +func (b *Builder) Comma() *Builder { + b.WriteString(", ") + return b +} + +// Pad adds a space to the query. +func (b *Builder) Pad() *Builder { + b.WriteString(" ") + return b +} + +// Join joins a list of Nodes to the builder. +func (b *Builder) Join(n ...Node) *Builder { + for i := range n { + query, args := n[i].Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + return b +} + +// JoinComma joins a list of Nodes and adds comma between them. +func (b *Builder) JoinComma(n ...Node) *Builder { + for i := range n { + if i > 0 { + b.Comma() + } + query, args := n[i].Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + return b +} + +// Nested gets a callback, and wraps its result with parentheses. +func (b *Builder) Nested(f func(*Builder)) *Builder { + nb := &Builder{} + nb.WriteString("(") + f(nb) + nb.WriteString(")") + nb.WriteTo(b) + b.args = append(b.args, nb.args...) + return b +} + +// ColumnBuilder is a builder for column definition in table creation. +type ColumnBuilder struct { + b Builder + typ string // column type. + name string // column name. + attr string // extra attributes. +} + +// Column returns a new ColumnBuilder with the given name. +// +// sql.Column("group_id").Type("int").Attr("UNIQUE") +// +func Column(name string) *ColumnBuilder { return &ColumnBuilder{name: name} } + +// Type sets the column type. +func (c *ColumnBuilder) Type(t string) *ColumnBuilder { + c.typ = t + return c +} + +// Attr sets an extra attribute for the column, like UNIQUE or AUTO_INCREMENT. +func (c *ColumnBuilder) Attr(a string) *ColumnBuilder { + if c.attr != "" && a != "" { + c.attr += " " + } + c.attr += a + return c +} + +// Query returns query representation of a Column. +func (c *ColumnBuilder) Query() (string, []interface{}) { + c.b.Append(c.name).Pad().WriteString(c.typ) + if c.attr != "" { + c.b.Pad().WriteString(c.attr) + } + return c.b.String(), c.b.args +} + +// TableBuilder is a query builder for `CREATE TABLE` statement. +type TableBuilder struct { + b Builder + name string // table name. + exists bool // check existence. + columns []*ColumnBuilder // table columns. + primary []string // primary key. + constraints []Node // foreign keys and indices. +} + +// CreateTable returns a query builder for the `CREATE TABLE` statement. +// +// CreateTable("users"). +// Columns( +// Column("id").Type("int").Attr("auto_increment"), +// Column("name").Type("varchar(255)"), +// ). +// PrimaryKey("id") +// +func CreateTable(name string) *TableBuilder { return &TableBuilder{b: Builder{}, name: name} } + +// IfNotExists appends the `IF NOT EXISTS` clause to the `CREATE TABLE` statement. +func (t *TableBuilder) IfNotExists() *TableBuilder { + t.exists = true + return t +} + +// Column appends the given column to the `CREATE TABLE` statement. +func (t *TableBuilder) Column(c *ColumnBuilder) *TableBuilder { + t.columns = append(t.columns, c) + return t +} + +// Columns appends the a list of columns to the builder. +func (t *TableBuilder) Columns(c ...*ColumnBuilder) *TableBuilder { + t.columns = append(t.columns, c...) + return t +} + +// PrimaryKey adds a column to the primary-key constraint in the statement. +func (t *TableBuilder) PrimaryKey(column ...string) *TableBuilder { + t.primary = append(t.primary, column...) + return t +} + +// ForeignKeys adds a list of foreign-keys to the statement (without constraints). +func (t *TableBuilder) ForeignKeys(fks ...*ForeignKeyBuilder) *TableBuilder { + nodes := make([]Node, len(fks)) + for i := range fks { + // erase the constraint symbol/name. + fks[i].symbol = "" + nodes[i] = fks[i] + } + t.constraints = append(t.constraints, nodes...) + return t +} + +// Constraints adds a list of foreign-key constraints to the statement. +func (t *TableBuilder) Constraints(fks ...*ForeignKeyBuilder) *TableBuilder { + nodes := make([]Node, len(fks)) + for i := range fks { + nodes[i] = &Wrapper{"CONSTRAINT %s", fks[i]} + } + t.constraints = append(t.constraints, nodes...) + return t +} + +// Query returns query representation of a `CREATE TABLE` statement. +func (t *TableBuilder) Query() (string, []interface{}) { + t.b.WriteString("CREATE TABLE ") + if t.exists { + t.b.WriteString("IF NOT EXISTS ") + } + t.b.Append(t.name) + t.b.Nested(func(b *Builder) { + for i, c := range t.columns { + if i > 0 { + b.Comma() + } + b.Join(c) + } + if len(t.primary) > 0 { + b.Comma().WriteString("PRIMARY KEY") + b.Nested(func(b *Builder) { + b.AppendComma(t.primary...) + }) + } + if len(t.constraints) > 0 { + b.Comma().JoinComma(t.constraints...) + } + }) + return t.b.String(), t.b.args +} + +// TableAlter is a query builder for `ALTER TABLE` statement. +type TableAlter struct { + b Builder + name string // table to alter. + nodes []Node // columns and foreign-keys to add. +} + +// AlterTable returns a query builder for the `ALTER TABLE` statement. +// +// AlterTable("users"). +// AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). +// AddForeignKey(ForeignKey().Columns("group_id"). Reference(Reference().Table("groups").Columns("id")).OnDelete("CASCADE")) +// +func AlterTable(name string) *TableAlter { return &TableAlter{b: Builder{}, name: name} } + +// AddColumn appends the `ADD COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) AddColumn(c *ColumnBuilder) *TableAlter { + t.nodes = append(t.nodes, &Wrapper{"ADD %s", c}) + return t +} + +// AddForeignKey adds a foreign key constraint to the `ALTER TABLE` statement. +func (t *TableAlter) AddForeignKey(fk *ForeignKeyBuilder) *TableAlter { + t.nodes = append(t.nodes, &Wrapper{"ADD CONSTRAINT %s", fk}) + return t +} + +// Query returns query representation of the `ALTER TABLE` statement. +func (t *TableAlter) Query() (string, []interface{}) { + t.b.WriteString("ALTER TABLE ") + t.b.Append(t.name) + t.b.Pad() + t.b.JoinComma(t.nodes...) + return t.b.String(), t.b.args +} + +// ForeignKeyBuilder is the builder for the foreign-key constraint clause. +type ForeignKeyBuilder struct { + b Builder + symbol string + columns []string + actions []string + ref *ReferenceBuilder +} + +// ForeignKey returns a builder for the foreign-key constraint clause in create/alter table statements. +// ForeignKey(). +// Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")). +// OnDelete("CASCADE") +// +func ForeignKey(symbol ...string) *ForeignKeyBuilder { + fk := &ForeignKeyBuilder{} + if len(symbol) != 0 { + fk.symbol = symbol[0] + } + return fk +} + +// Symbol sets the symbol of the foreign key. +func (fk *ForeignKeyBuilder) Symbol(s string) *ForeignKeyBuilder { + fk.symbol = s + return fk +} + +// Columns sets the columns of the foreign key in the source table. +func (fk *ForeignKeyBuilder) Columns(s ...string) *ForeignKeyBuilder { + fk.columns = append(fk.columns, s...) + return fk +} + +// Reference sets the reference clause. +func (fk *ForeignKeyBuilder) Reference(r *ReferenceBuilder) *ForeignKeyBuilder { + fk.ref = r + return fk +} + +// OnDelete sets the on delete action for this constraint. +func (fk *ForeignKeyBuilder) OnDelete(action string) *ForeignKeyBuilder { + fk.actions = append(fk.actions, "ON DELETE "+action) + return fk +} + +// OnUpdate sets the on delete action for this constraint. +func (fk *ForeignKeyBuilder) OnUpdate(action string) *ForeignKeyBuilder { + fk.actions = append(fk.actions, "ON UPDATE "+action) + return fk +} + +// Query returns query representation of a foreign key constraint. +func (fk *ForeignKeyBuilder) Query() (string, []interface{}) { + if fk.symbol != "" { + fk.b.Append(fk.symbol) + fk.b.Pad() + } + fk.b.WriteString("FOREIGN KEY") + fk.b.Nested(func(b *Builder) { + b.AppendComma(fk.columns...) + }) + fk.b.Pad() + fk.b.Join(fk.ref) + for _, action := range fk.actions { + fk.b.Pad().WriteString(action) + } + return fk.b.String(), fk.b.args +} + +// ReferenceBuilder is a builder for the reference clause in constraints. For example, in foreign key creation. +type ReferenceBuilder struct { + b Builder + table string // referenced table. + columns []string // referenced columns. + actions []string // reference actions. +} + +// Reference create a reference builder for the reference_option clause. +// +// Reference().Table("groups").Columns("id") +// +func Reference() *ReferenceBuilder { return &ReferenceBuilder{} } + +// Table sets the referenced table. +func (r *ReferenceBuilder) Table(s string) *ReferenceBuilder { + r.table = s + return r +} + +// Columns sets the columns of the referenced table. +func (r *ReferenceBuilder) Columns(s ...string) *ReferenceBuilder { + r.columns = append(r.columns, s...) + return r +} + +// Query returns query representation of a reference clause. +func (r *ReferenceBuilder) Query() (string, []interface{}) { + r.b.WriteString("REFERENCES ") + r.b.Append(r.table) + r.b.Nested(func(b *Builder) { + b.AppendComma(r.columns...) + }) + return r.b.String(), r.b.args +} + +// InsertBuilder is a builder for `INSERT INTO` statement. +type InsertBuilder struct { + b Builder + table string + columns []string + defaults string + values [][]interface{} +} + +// Insert creates a builder for the `INSERT INTO` statement. +// +// Insert("users"). +// Columns("name", "age"). +// Values("a8m", 10). +// Values("foo", 20) +// +// Note: Insert inserts all values in one batch. +func Insert(table string) *InsertBuilder { return &InsertBuilder{table: table} } + +// Set is a syntactic sugar API for inserting only one row. +func (i *InsertBuilder) Set(column string, v interface{}) *InsertBuilder { + i.columns = append(i.columns, column) + if len(i.values) == 0 { + i.values = append(i.values, []interface{}{v}) + } else { + i.values[0] = append(i.values[0], v) + } + return i +} + +// Columns sets the columns of the insert statement. +func (i *InsertBuilder) Columns(columns ...string) *InsertBuilder { + i.columns = append(i.columns, columns...) + return i +} + +// Values append a value tuple for the insert statement. +func (i *InsertBuilder) Values(values ...interface{}) *InsertBuilder { + i.values = append(i.values, values) + return i +} + +// Default sets the default values clause based on the dialect type. +func (i *InsertBuilder) Default(d string) *InsertBuilder { + switch d { + case dialect.MySQL: + i.defaults = "VALUES ()" + case dialect.SQLite: + i.defaults = "DEFAULT VALUES" + } + return i +} + +// Query returns query representation of an `INSERT INTO` statement. +func (i *InsertBuilder) Query() (string, []interface{}) { + i.b.WriteString("INSERT INTO ") + if i.defaults != "" && len(i.columns) == 0 { + return i.b.Append(i.table).Pad().String() + i.defaults, nil + } + i.b.Append(i.table).Pad().Nested(func(b *Builder) { + b.AppendComma(i.columns...) + }) + i.b.WriteString(" VALUES ") + for j, v := range i.values { + if j > 0 { + i.b.Comma() + } + i.b.Nested(func(b *Builder) { + b.Args(v...) + }) + } + return i.b.String(), i.b.args +} + +// UpdateBuilder is a builder for `UPDATE` statement. +type UpdateBuilder struct { + b Builder + table string + where *Predicate + nulls []string + columns []string + values []interface{} +} + +// Update creates a builder for the `UPDATE` statement. +// +// Update("users").Set("name", "foo").Set("age", 10) +// +func Update(table string) *UpdateBuilder { return &UpdateBuilder{table: table} } + +// Set sets a column and a its value. +func (u *UpdateBuilder) Set(column string, v interface{}) *UpdateBuilder { + u.columns = append(u.columns, column) + u.values = append(u.values, v) + return u +} + +// SetNull sets a column as null value. +func (u *UpdateBuilder) SetNull(column string) *UpdateBuilder { + u.nulls = append(u.nulls, column) + return u +} + +// Where adds a where predicate for update statement. +func (u *UpdateBuilder) Where(p *Predicate) *UpdateBuilder { + if u.where != nil { + u.where.merge(p) + } else { + u.where = p + } + return u +} + +// Query returns query representation of an `UPDATE` statement. +func (u *UpdateBuilder) Query() (string, []interface{}) { + u.b.WriteString("UPDATE ") + u.b.Append(u.table).Pad().WriteString("SET ") + for j, c := range u.nulls { + if j > 0 { + u.b.Comma() + } + u.b.Append(c).WriteString(" = NULL") + } + if len(u.nulls) > 0 && len(u.columns) > 0 { + u.b.Comma() + } + for j, c := range u.columns { + if j > 0 { + u.b.Comma() + } + u.b.Append(c).WriteString(" = ?") + } + u.b.args = append(u.b.args, u.values...) + if u.where != nil { + u.b.WriteString(" WHERE ") + u.b.Join(u.where) + } + return u.b.String(), u.b.args +} + +// DeleteBuilder is a builder for `DELETE` statement. +type DeleteBuilder struct { + b Builder + table string + where *Predicate +} + +// Delete creates a builder for the `DELETE` statement. +// +// Delete("users"). +// Where( +// Or( +// EQ("name", "foo").And().EQ("age", 10), +// EQ("name", "bar").And().EQ("age", 20), +// And( +// EQ("name", "qux"), +// EQ("age", 1).Or().EQ("age", 2), +// ), +// ), +// ) +// +func Delete(table string) *DeleteBuilder { return &DeleteBuilder{table: table} } + +// Where appends a where predicate to the `DELETE` statement. +func (d *DeleteBuilder) Where(p *Predicate) *DeleteBuilder { + if d.where != nil { + d.where.merge(p) + } else { + d.where = p + } + return d +} + +// FromSelect make it possible to delete a sub query. +func (d *DeleteBuilder) FromSelect(s *Selector) *DeleteBuilder { + d.Where(s.where) + if s.from != nil { + d.table = s.from.name + } + return d +} + +// Query returns query representation of a `DELETE` statement. +func (d *DeleteBuilder) Query() (string, []interface{}) { + d.b.WriteString("DELETE FROM ") + d.b.Append(d.table) + if d.where != nil { + d.b.WriteString(" WHERE ") + d.b.Join(d.where) + } + return d.b.String(), d.b.args +} + +// Predicate is a where predicate. +type Predicate struct { + b Builder +} + +// P creates a new predicates. +// +// P().EQ("name", "a8m").And().EQ("age", 30) +// +func P() *Predicate { return &Predicate{} } + +// Or combines all given predicates with OR between them. +// +// Or(EQ("name", "foo"), EQ("name", "bar")) +// +func Or(preds ...*Predicate) *Predicate { + p := P() + for i := range preds { + p.Or().b.Nested(func(b *Builder) { + b.Join(preds[i]) + }) + } + return p +} + +// Or appends an OR only if it's not a start of expression. +func (p *Predicate) Or() *Predicate { + if p.b.Len() > 0 { + p.b.WriteString(" OR ") + } + return p +} + +// False appends the FALSE keyword to the predicate. +// +// Delete().From("users").Where(False()) +// +func False() *Predicate { + return (&Predicate{}).False() +} + +// False appends FALSE to the predicate. +func (p *Predicate) False() *Predicate { + p.b.WriteString("FALSE") + return p +} + +// Not wraps the given predicate with the not predicate. +// +// Not(Or(EQ("name", "foo"), EQ("name", "bar"))) +// +func Not(pred *Predicate) *Predicate { + p := P() + p.Not().b.Nested(func(b *Builder) { + b.Join(pred) + }) + return p +} + +// Not appends NOT to the predicate. +func (p *Predicate) Not() *Predicate { + p.b.WriteString("NOT ") + return p +} + +// And combines all given predicates with AND between them. +func And(preds ...*Predicate) *Predicate { + p := P() + for i := range preds { + p.And().b.Nested(func(b *Builder) { + b.Join(preds[i]) + }) + } + return p +} + +// And appends And only if it's not a start of expression. +func (p *Predicate) And() *Predicate { + if p.b.Len() > 0 { + p.b.WriteString(" AND ") + } + return p +} + +// EQ returns a "=" predicate. +func EQ(col string, value interface{}) *Predicate { + return (&Predicate{}).EQ(col, value) +} + +// EQ appends a "=" predicate. +func (p *Predicate) EQ(col string, arg interface{}) *Predicate { + p.b.Append(col).WriteString(" = ") + p.b.Arg(arg) + return p +} + +// NEQ returns a "<>" predicate. +func NEQ(col string, value interface{}) *Predicate { + return (&Predicate{}).NEQ(col, value) +} + +// NEQ appends a "<>" predicate. +func (p *Predicate) NEQ(col string, arg interface{}) *Predicate { + p.b.Append(col).WriteString(" <> ") + p.b.Arg(arg) + return p +} + +// LT returns a "<" predicate. +func LT(col string, value interface{}) *Predicate { + return (&Predicate{}).LT(col, value) +} + +// LT appends a "<" predicate. +func (p *Predicate) LT(col string, arg interface{}) *Predicate { + p.b.Append(col).WriteString(" < ") + p.b.Arg(arg) + return p +} + +// LTE returns a "<=" predicate. +func LTE(col string, value interface{}) *Predicate { + return (&Predicate{}).LTE(col, value) +} + +// LTE appends a "<=" predicate. +func (p *Predicate) LTE(col string, arg interface{}) *Predicate { + p.b.Append(col).WriteString(" <= ") + p.b.Arg(arg) + return p +} + +// GT returns a ">" predicate. +func GT(col string, value interface{}) *Predicate { + return (&Predicate{}).GT(col, value) +} + +// GT appends a ">" predicate. +func (p *Predicate) GT(col string, arg interface{}) *Predicate { + p.b.Append(col).WriteString(" > ") + p.b.Arg(arg) + return p +} + +// GTE returns a ">=" predicate. +func GTE(col string, value interface{}) *Predicate { + return (&Predicate{}).GTE(col, value) +} + +// GTE appends a ">=" predicate. +func (p *Predicate) GTE(col string, arg interface{}) *Predicate { + p.b.Append(col).WriteString(" >= ") + p.b.Arg(arg) + return p +} + +// NotNull returns the `IS NOT NULL` predicate. +func NotNull(col string) *Predicate { + return (&Predicate{}).NotNull(col) +} + +// NotNull appends the `IS NOT NULL` predicate. +func (p *Predicate) NotNull(col string) *Predicate { + p.b.Append(col).WriteString(" IS NOT NULL") + return p +} + +// IsNull returns the `IS NULL` predicate. +func IsNull(col string) *Predicate { + return (&Predicate{}).IsNull(col) +} + +// IsNull appends the `IS NULL` predicate. +func (p *Predicate) IsNull(col string) *Predicate { + p.b.Append(col).WriteString(" IS NULL") + return p +} + +// In returns the `IN` predicate. +func In(col string, args ...interface{}) *Predicate { + return (&Predicate{}).In(col, args...) +} + +// In appends the `IN` predicate. +func (p *Predicate) In(col string, args ...interface{}) *Predicate { + if len(args) == 0 { + return p + } + p.b.Append(col).WriteString(" IN ") + p.b.Nested(func(b *Builder) { + if s, ok := args[0].(*Selector); ok { + b.Join(s) + } else { + b.Args(args...) + } + }) + return p +} + +// InInts returns the `IN` predicate for ints. +func InInts(col string, args ...int) *Predicate { + return (&Predicate{}).InInts(col, args...) +} + +// InInts adds the `IN` predicate for ints. +func (p *Predicate) InInts(col string, args ...int) *Predicate { + iface := make([]interface{}, len(args)) + for i := range args { + iface[i] = args[i] + } + return p.In(col, iface...) +} + +// NotIn returns the `Not IN` predicate. +func NotIn(col string, args ...interface{}) *Predicate { + return (&Predicate{}).NotIn(col, args...) +} + +// NotIn appends the `Not IN` predicate. +func (p *Predicate) NotIn(col string, args ...interface{}) *Predicate { + p.b.Append(col).WriteString(" NOT IN ") + p.b.Nested(func(b *Builder) { + b.Args(args...) + }) + return p +} + +// Like returns the `LIKE` predicate. +func Like(col, pattern string) *Predicate { + return (&Predicate{}).Like(col, pattern) +} + +// Like appends the `LIKE` predicate. +func (p *Predicate) Like(col, pattern string) *Predicate { + p.b.Append(col).WriteString(" LIKE ") + p.b.Arg(pattern) + return p +} + +// HasPrefix is an helper predicate that checks prefix using the LIKE predicate. +func HasPrefix(col, prefix string) *Predicate { + return (&Predicate{}).HasPrefix(col, prefix) +} + +// HasPrefix is an helper predicate that checks prefix using the LIKE predicate. +func (p *Predicate) HasPrefix(col, prefix string) *Predicate { + return p.Like(col, prefix+"%") +} + +// HasSuffix is an helper predicate that checks suffix using the LIKE predicate. +func HasSuffix(col, suffix string) *Predicate { return (&Predicate{}).HasSuffix(col, suffix) } + +// HasSuffix is an helper predicate that checks suffix using the LIKE predicate. +func (p *Predicate) HasSuffix(col, suffix string) *Predicate { + return p.Like(col, "%"+suffix) +} + +// Contains is an helper predicate that checks substring using the LIKE predicate. +func Contains(col, sub string) *Predicate { return (&Predicate{}).Contains(col, sub) } + +// Contains is an helper predicate that checks substring using the LIKE predicate. +func (p *Predicate) Contains(col, sub string) *Predicate { + return p.Like(col, "%"+sub+"%") +} + +// Query returns query representation of a predicate. +func (p *Predicate) Query() (string, []interface{}) { + return p.b.String(), p.b.args +} + +// merge two predicates. +func (p *Predicate) merge(pred *Predicate) *Predicate { + query, args := pred.Query() + p.And().b.WriteString(query) + p.b.args = append(p.b.args, args...) + return p +} + +// TableView is a view that returns a table view. Can ne a Table, Selector or a View (WITH statement). +type TableView interface { + view() +} + +// Count wraps the column with the COUNT aggregation function. +func Count(column string) string { + return agg("COUNT", column) +} + +// Max wraps the column with the MAX aggregation function. +func Max(column string) string { + return agg("MAX", column) +} + +// Min wraps the column with the MIN aggregation function. +func Min(column string) string { + return agg("MIN", column) +} + +// Sum wraps the column with the SUM aggregation function. +func Sum(column string) string { + return agg("SUM", column) +} + +// Avg wraps the column with the AVG aggregation function. +func Avg(column string) string { + return agg("AVG", column) +} + +// As suffixed the given column with an alias (`a` AS `b`). +func As(column string, as string) string { + b := Builder{} + b.Append(column).Pad().WriteString("AS") + b.Pad().Append(as) + return b.String() +} + +// Distinct prefixed the given columns with the `DISTINCT` keyword (DISTINCT `id`). +func Distinct(columns ...string) string { + b := Builder{} + b.WriteString("DISTINCT") + b.Pad().AppendComma(columns...) + return b.String() +} + +// SelectTable is a table selector. +type SelectTable struct { + name string + as string +} + +// Table returns a new table selector. +// +// t1 := Table("users").As("u") +// return Select(t1.C("name")) +// +func Table(name string) *SelectTable { + return &SelectTable{name: name} +} + +// As adds the AS clause to the table selector. +func (s *SelectTable) As(alias string) *SelectTable { + s.as = alias + return s +} + +// C returns a formatted string for the table column. +func (s *SelectTable) C(column string) string { + name := s.name + if s.as != "" { + name = s.as + } + return fmt.Sprintf("`%s`.`%s`", name, column) +} + +// Columns returns a list of formatted strings for the table columns. +func (s *SelectTable) Columns(columns ...string) []string { + var names []string + for _, c := range columns { + names = append(names, s.C(c)) + } + return names +} + +// ref returns the table reference. +func (s *SelectTable) ref() string { + if s.as == "" { + return fmt.Sprintf("`%s`", s.name) + } + return fmt.Sprintf("`%s` AS `%s`", s.name, s.as) +} + +// implement the table view. +func (*SelectTable) view() {} + +// join table option. +type join struct { + on string + kind string + table TableView +} + +// Selector a builder for the `SELECT` statement. +type Selector struct { + as string + columns []string + from *SelectTable + joins []join + where *Predicate + or bool + not bool + order []string + group []string + having *Predicate + limit *int + offset *int + distinct bool +} + +// Select returns a new selector for the `SELECT` statement. +// +// t1 := Table("users").As("u") +// t2 := Select().From(Table("groups")).Where(EQ("user_id", 10)).As("g") +// return Select(t1.C("id"), t2.C("name")). +// From(t1). +// Join(t2). +// On(t1.C("id"), t2.C("user_id")) +// +func Select(columns ...string) *Selector { + return (&Selector{}).Select(columns...) +} + +// Select changes the columns selection of the SELECT statement. +// Empty selection means all columns *. +func (s *Selector) Select(columns ...string) *Selector { + s.columns = columns + return s +} + +// From sets the source of `FORM` clause. +func (s *Selector) From(t *SelectTable) *Selector { + s.from = t + return s +} + +// Distinct adds the DISTINCT keyword to the `SELECT` statement. +func (s *Selector) Distinct() *Selector { + s.distinct = true + return s +} + +// Limit adds the `LIMIT` clause to the `SELECT` statement. +func (s *Selector) Limit(limit int) *Selector { + s.limit = &limit + return s +} + +// Offset adds the `OFFSET` clause to the `SELECT` statement. +func (s *Selector) Offset(offset int) *Selector { + s.offset = &offset + return s +} + +// Where sets or appends the given predicate to the statement. +func (s *Selector) Where(p *Predicate) *Selector { + if s.not { + p = Not(p) + s.not = false + } + switch { + case s.where == nil: + s.where = p + case s.where != nil && s.or: + s.where = Or(s.where, p) + s.or = false + default: + s.where.merge(p) + } + return s +} + +// FromSelect copies the predicate from a selector. +func (s *Selector) FromSelect(s2 *Selector) *Selector { + s.where = s2.where + return s +} + +// Not sets the next coming predicate with not. +func (s *Selector) Not() *Selector { + s.not = true + return s +} + +// Or sets the next coming predicate with OR operator (disjunction). +func (s *Selector) Or() *Selector { + s.or = true + return s +} + +// Table returns the selected table. +func (s *Selector) Table() *SelectTable { + return s.from +} + +// Join appends a `JOIN` clause to the statement. +func (s *Selector) Join(t TableView) *Selector { + s.joins = append(s.joins, join{ + kind: "JOIN", + table: t, + }) + switch view := t.(type) { + case *SelectTable: + if view.as == "" { + view.as = "t0" + } + case *Selector: + if view.as == "" { + view.as = "t" + strconv.Itoa(len(s.joins)) + } + } + return s +} + +// C returns a formatted string for a selected column from this statement. +func (s *Selector) C(column string) string { + if s.as != "" { + return fmt.Sprintf("`%s`.`%s`", s.as, column) + } + return s.from.C(column) +} + +// Columns returns a list of formatted strings for a selected columns from this statement. +func (s *Selector) Columns(columns ...string) []string { + var names []string + for _, c := range columns { + names = append(names, s.C(c)) + } + return names +} + +// On sets the `ON` clause for the `JOIN` operation. +func (s *Selector) On(c1, c2 string) *Selector { + if len(s.joins) > 0 { + s.joins[len(s.joins)-1].on = fmt.Sprintf("%s = %s", c1, c2) + } + return s +} + +// As give this selection an alias. +func (s *Selector) As(alias string) *Selector { + s.as = alias + return s +} + +// Count sets the Select statement to be a `SELECT COUNT(*)`. +func (s *Selector) Count(columns ...string) *Selector { + column := "*" + if len(columns) > 0 { + b := Builder{} + b.AppendComma(columns...) + column = b.String() + } + s.columns = []string{Count(column)} + return s +} + +// Asc adds the ASC suffix for the given column. +func Asc(column string) string { + b := Builder{} + b.Append(column).WriteString(" ASC") + return b.String() +} + +// Desc adds the DESC suffix for the given column. +func Desc(column string) string { + b := Builder{} + b.Append(column).WriteString(" DESC") + return b.String() +} + +// OrderBy appends the `ORDER BY` clause to the `SELECT` statement. +func (s *Selector) OrderBy(columns ...string) *Selector { + s.order = append(s.order, columns...) + return s +} + +// GroupBy appends the `GROUP BY` clause to the `SELECT` statement. +func (s *Selector) GroupBy(columns ...string) *Selector { + s.group = append(s.group, columns...) + return s +} + +// Having appends a predicate for the `HAVING` clause. +func (s *Selector) Having(p *Predicate) *Selector { + s.having = p + return s +} + +// Query returns query representation of a `SELECT` statement. +func (s *Selector) Query() (string, []interface{}) { + b := &Builder{} + b.WriteString("SELECT ") + if s.distinct { + b.WriteString("DISTINCT ") + } + if len(s.columns) > 0 { + b.AppendComma(s.columns...) + } else { + b.WriteString("*") + } + b.WriteString(" FROM ") + b.Append(s.from.ref()) + if len(s.joins) > 0 { + for _, join := range s.joins { + b.WriteString(fmt.Sprintf(" %s ", join.kind)) + switch view := join.table.(type) { + case *SelectTable: + b.WriteString(view.ref()) + case *Selector: + query, args := view.Query() + b.WriteString(fmt.Sprintf("(%s) AS `%s`", query, view.as)) + b.args = append(b.args, args...) + } + if join.on != "" { + b.WriteString(" ON ") + b.WriteString(join.on) + } + } + } + if s.where != nil { + b.WriteString(" WHERE ") + query, args := s.where.Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + if len(s.group) > 0 { + b.WriteString(" GROUP BY ") + b.AppendComma(s.group...) + } + if s.having != nil { + b.WriteString(" HAVING ") + query, args := s.where.Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + if s.order != nil { + b.WriteString(" ORDER BY ") + b.AppendComma(s.order...) + } + if s.limit != nil { + b.WriteString(" LIMIT ") + b.Arg(*s.limit) + } + if s.offset != nil { + b.WriteString(" OFFSET ") + b.Arg(*s.offset) + } + return b.String(), b.args +} + +// implement the table view interface. +func (*Selector) view() {} + +// WithBuilder is the builder for the `WITH` statement. +type WithBuilder struct { + b Builder + name string + s *Selector +} + +// With returns a new builder for the `WITH` statement. +// +// n := Nodes{With("users_view").As(Select().From(Table("users"))), Select().From(Table("users_view"))} +// return n.Query() +// +func With(name string) *WithBuilder { + return &WithBuilder{name: name} +} + +// Name returns the name of the view. +func (w *WithBuilder) Name() string { return w.name } + +// As sets the view sub query. +func (w *WithBuilder) As(s *Selector) *WithBuilder { + w.s = s + return w +} + +// Query returns query representation of a `WITH` clause. +func (w *WithBuilder) Query() (string, []interface{}) { + w.b.WriteString("WITH " + w.name) + w.b.WriteString(" AS ") + w.b.Nested(func(b *Builder) { + b.Join(w.s) + }) + return w.b.String(), w.b.args +} + +// implement the table view interface. +func (*WithBuilder) view() {} + +// Wrapper wraps a given node with different format. +// Used to prefix/suffix other queries. +type Wrapper struct { + format string + wrapped Node +} + +// Query returns query representation of a wrapped node. +func (w *Wrapper) Query() (string, []interface{}) { + query, args := w.wrapped.Query() + return fmt.Sprintf(w.format, query), args +} + +func isFunc(s string) bool { + return strings.Contains(s, "(") && strings.Contains(s, ")") +} + +func isModifier(s string) bool { + for _, m := range []string{"DISTINCT", "ALL", "WITH ROLLUP"} { + if strings.HasPrefix(s, m) { + return true + } + } + return false +} + +func agg(fn, column string) string { + b := Builder{} + b.WriteString(fn) + b.Nested(func(b *Builder) { + b.Append(column) + }) + return b.String() +} diff --git a/dialect/sql/builder_test.go b/dialect/sql/builder_test.go new file mode 100644 index 000000000..cbd803911 --- /dev/null +++ b/dialect/sql/builder_test.go @@ -0,0 +1,382 @@ +package sql + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuilder(t *testing.T) { + tests := []struct { + input Node + wantQuery string + wantArgs []interface{} + }{ + { + input: CreateTable("users"). + Columns( + Column("id").Type("int").Attr("auto_increment"), + Column("name").Type("varchar(255)"), + ). + PrimaryKey("id"), + wantQuery: "CREATE TABLE `users`(`id` int auto_increment, `name` varchar(255), PRIMARY KEY(`id`))", + }, + { + input: CreateTable("users"). + IfNotExists(). + Columns( + Column("id").Type("int").Attr("auto_increment"), + ). + PrimaryKey("id", "name"), + wantQuery: "CREATE TABLE IF NOT EXISTS `users`(`id` int auto_increment, PRIMARY KEY(`id`, `name`))", + }, + { + input: CreateTable("users"). + IfNotExists(). + Columns( + Column("id").Type("int").Attr("auto_increment"), + Column("card_id").Type("int"), + ). + PrimaryKey("id", "name"). + ForeignKeys(ForeignKey().Columns("card_id"). + Reference(Reference().Table("cards").Columns("id")).OnDelete("SET NULL")), + wantQuery: "CREATE TABLE IF NOT EXISTS `users`(`id` int auto_increment, `card_id` int, PRIMARY KEY(`id`, `name`), FOREIGN KEY(`card_id`) REFERENCES `cards`(`id`) ON DELETE SET NULL)", + }, + { + input: AlterTable("users"). + AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). + AddForeignKey(ForeignKey().Columns("group_id"). + Reference(Reference().Table("groups").Columns("id")). + OnDelete("CASCADE"), + ), + wantQuery: "ALTER TABLE `users` ADD `group_id` int UNIQUE, ADD CONSTRAINT FOREIGN KEY(`group_id`) REFERENCES `groups`(`id`) ON DELETE CASCADE", + }, + { + input: AlterTable("users"). + AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). + AddForeignKey(ForeignKey().Columns("group_id"). + Reference(Reference().Table("groups").Columns("id")), + ), + wantQuery: "ALTER TABLE `users` ADD `group_id` int UNIQUE, ADD CONSTRAINT FOREIGN KEY(`group_id`) REFERENCES `groups`(`id`)", + }, + { + input: AlterTable("users"). + AddColumn(Column("age").Type("int")). + AddColumn(Column("name").Type("varchar(255)")), + wantQuery: "ALTER TABLE `users` ADD `age` int, ADD `name` varchar(255)", + }, + { + input: AlterTable("users"). + AddForeignKey(ForeignKey().Columns("group_id"). + Reference(Reference().Table("groups").Columns("id")), + ). + AddForeignKey(ForeignKey().Columns("location_id"). + Reference(Reference().Table("locations").Columns("id")), + ), + wantQuery: "ALTER TABLE `users` ADD CONSTRAINT FOREIGN KEY(`group_id`) REFERENCES `groups`(`id`), ADD CONSTRAINT FOREIGN KEY(`location_id`) REFERENCES `locations`(`id`)", + }, + { + input: Insert("users").Columns("age").Values(1), + wantQuery: "INSERT INTO `users` (`age`) VALUES (?)", + wantArgs: []interface{}{1}, + }, + { + input: Insert("users").Columns("name", "age").Values("a8m", 10), + wantQuery: "INSERT INTO `users` (`name`, `age`) VALUES (?, ?)", + wantArgs: []interface{}{"a8m", 10}, + }, + { + input: Insert("users").Columns("name", "age").Values("a8m", 10).Values("foo", 20), + wantQuery: "INSERT INTO `users` (`name`, `age`) VALUES (?, ?), (?, ?)", + wantArgs: []interface{}{"a8m", 10, "foo", 20}, + }, + { + input: Update("users").Set("name", "foo"), + wantQuery: "UPDATE `users` SET `name` = ?", + wantArgs: []interface{}{"foo"}, + }, + { + input: Update("users").Set("name", "foo").Set("age", 10), + wantQuery: "UPDATE `users` SET `name` = ?, `age` = ?", + wantArgs: []interface{}{"foo", 10}, + }, + { + input: Update("users").Set("name", "foo").Where(EQ("name", "bar")), + wantQuery: "UPDATE `users` SET `name` = ? WHERE `name` = ?", + wantArgs: []interface{}{"foo", "bar"}, + }, + { + input: Update("users").Set("name", "foo").SetNull("spouse_id"), + wantQuery: "UPDATE `users` SET `spouse_id` = NULL, `name` = ?", + wantArgs: []interface{}{"foo"}, + }, + { + input: Update("users").Set("name", "foo"). + Where(EQ("name", "bar")). + Where(EQ("age", 20)), + wantQuery: "UPDATE `users` SET `name` = ? WHERE `name` = ? AND `age` = ?", + wantArgs: []interface{}{"foo", "bar", 20}, + }, + { + input: Update("users"). + Set("name", "foo"). + Set("age", 10). + Where(EQ("name", "bar").Or().EQ("name", "baz")), + wantQuery: "UPDATE `users` SET `name` = ?, `age` = ? WHERE `name` = ? OR `name` = ?", + wantArgs: []interface{}{"foo", 10, "bar", "baz"}, + }, + { + input: Update("users"). + Set("name", "foo"). + Set("age", 10). + Where(P().EQ("name", "foo")), + wantQuery: "UPDATE `users` SET `name` = ?, `age` = ? WHERE `name` = ?", + wantArgs: []interface{}{"foo", 10, "foo"}, + }, + { + input: Update("users"). + Set("name", "foo"). + Where(In("name", "bar", "baz").And().NotIn("age", 1, 2)), + wantQuery: "UPDATE `users` SET `name` = ? WHERE `name` IN (?, ?) AND `age` NOT IN (?, ?)", + wantArgs: []interface{}{"foo", "bar", "baz", 1, 2}, + }, + { + input: Update("users"). + Set("name", "foo"). + Where(HasPrefix("nickname", "a8m").And().Contains("lastname", "mash")), + wantQuery: "UPDATE `users` SET `name` = ? WHERE `nickname` LIKE ? AND `lastname` LIKE ?", + wantArgs: []interface{}{"foo", "a8m%", "%mash%"}, + }, + { + input: Update("users"). + Set("name", "foo"). + Set("age", 10). + Where(P().EQ("name", "foo").And().EQ("age", 20)), + wantQuery: "UPDATE `users` SET `name` = ?, `age` = ? WHERE `name` = ? AND `age` = ?", + wantArgs: []interface{}{"foo", 10, "foo", 20}, + }, + { + input: Delete("users"). + Where(NotNull("parent_id")), + wantQuery: "DELETE FROM `users` WHERE `parent_id` IS NOT NULL", + }, + { + input: Delete("users"). + Where(False().And().False()), + wantQuery: "DELETE FROM `users` WHERE FALSE AND FALSE", + }, + { + input: Delete("users"). + Where(NotNull("parent_id").Or().EQ("parent_id", 10)), + wantQuery: "DELETE FROM `users` WHERE `parent_id` IS NOT NULL OR `parent_id` = ?", + wantArgs: []interface{}{10}, + }, + { + input: Delete("users"). + Where( + Or( + EQ("name", "foo").And().EQ("age", 10), + EQ("name", "bar").And().EQ("age", 20), + And( + EQ("name", "qux"), + EQ("age", 1).Or().EQ("age", 2), + ), + ), + ), + wantQuery: "DELETE FROM `users` WHERE (`name` = ? AND `age` = ?) OR (`name` = ? AND `age` = ?) OR ((`name` = ?) AND (`age` = ? OR `age` = ?))", + wantArgs: []interface{}{"foo", 10, "bar", 20, "qux", 1, 2}, + }, + { + input: Select().From(Table("users")), + wantQuery: "SELECT * FROM `users`", + }, + { + input: Select().From(Table("users").As("u")), + wantQuery: "SELECT * FROM `users` AS `u`", + }, + { + input: func() Node { + t1 := Table("users").As("u") + t2 := Table("groups").As("g") + return Select(t1.C("id"), t2.C("name")).From(t1).Join(t2) + }(), + wantQuery: "SELECT `u`.`id`, `g`.`name` FROM `users` AS `u` JOIN `groups` AS `g`", + }, + { + input: func() Node { + t1 := Table("users").As("u") + t2 := Table("groups").As("g") + return Select(t1.C("id"), t2.C("name")). + From(t1). + Join(t2). + On(t1.C("id"), t2.C("user_id")) + }(), + wantQuery: "SELECT `u`.`id`, `g`.`name` FROM `users` AS `u` JOIN `groups` AS `g` ON `u`.`id` = `g`.`user_id`", + }, + { + input: func() Node { + t1 := Table("users").As("u") + t2 := Table("groups").As("g") + return Select(t1.C("id"), t2.C("name")). + From(t1). + Join(t2). + On(t1.C("id"), t2.C("user_id")). + Where(EQ(t1.C("name"), "bar").And().NotNull(t2.C("name"))) + }(), + wantQuery: "SELECT `u`.`id`, `g`.`name` FROM `users` AS `u` JOIN `groups` AS `g` ON `u`.`id` = `g`.`user_id` WHERE `u`.`name` = ? AND `g`.`name` IS NOT NULL", + wantArgs: []interface{}{"bar"}, + }, + { + input: func() Node { + t1 := Table("users").As("u") + return Select(t1.Columns("name", "age")...).From(t1) + }(), + wantQuery: "SELECT `u`.`name`, `u`.`age` FROM `users` AS `u`", + }, + { + input: func() Node { + t1 := Table("users").As("u") + t2 := Select().From(Table("groups")).Where(EQ("user_id", 10)).As("g") + return Select(t1.C("id"), t2.C("name")). + From(t1). + Join(t2). + On(t1.C("id"), t2.C("user_id")) + }(), + wantQuery: "SELECT `u`.`id`, `g`.`name` FROM `users` AS `u` JOIN (SELECT * FROM `groups` WHERE `user_id` = ?) AS `g` ON `u`.`id` = `g`.`user_id`", + wantArgs: []interface{}{10}, + }, + { + input: func() Node { + selector := Select().Where(EQ("name", "foo").Or().EQ("name", "bar")) + return Delete("users").FromSelect(selector) + }(), + wantQuery: "DELETE FROM `users` WHERE `name` = ? OR `name` = ?", + wantArgs: []interface{}{"foo", "bar"}, + }, + { + input: func() Node { + selector := Select().From(Table("users")).As("t") + return selector.Select(selector.C("name")) + }(), + wantQuery: "SELECT `t`.`name` FROM `users`", + }, + { + input: func() Node { + selector := Select().From(Table("groups")).Where(EQ("name", "foo")) + return Delete("users").FromSelect(selector) + }(), + wantQuery: "DELETE FROM `groups` WHERE `name` = ?", + wantArgs: []interface{}{"foo"}, + }, + { + input: func() Node { + selector := Select() + return Delete("users").FromSelect(selector) + }(), + wantQuery: "DELETE FROM `users`", + }, + { + input: Select().From(Table("users")).Where(Not(EQ("name", "foo").And().EQ("age", "bar"))), + wantQuery: "SELECT * FROM `users` WHERE NOT (`name` = ? AND `age` = ?)", + wantArgs: []interface{}{"foo", "bar"}, + }, + { + input: func() Node { + t1 := Table("users") + return Select(). + From(t1). + Where(In(t1.C("id"), Select("owner_id").From(Table("pets")).Where(EQ("name", "pedro")))) + }(), + wantQuery: "SELECT * FROM `users` WHERE `users`.`id` IN (SELECT `owner_id` FROM `pets` WHERE `name` = ?)", + wantArgs: []interface{}{"pedro"}, + }, + { + input: func() Node { + t1 := Table("users") + return Select(). + From(t1). + Where(Not(In(t1.C("id"), Select("owner_id").From(Table("pets")).Where(EQ("name", "pedro"))))) + }(), + wantQuery: "SELECT * FROM `users` WHERE NOT (`users`.`id` IN (SELECT `owner_id` FROM `pets` WHERE `name` = ?))", + wantArgs: []interface{}{"pedro"}, + }, + { + input: Select().Count().From(Table("users")), + wantQuery: "SELECT COUNT(*) FROM `users`", + }, + { + input: Select().Count(Distinct("id")).From(Table("users")), + wantQuery: "SELECT COUNT(DISTINCT `id`) FROM `users`", + }, + { + input: func() Node { + t1 := Table("users") + t2 := Select().From(Table("groups")) + t3 := Select().Count().From(t1).Join(t1).On(t2.C("id"), t1.C("blocked_id")) + return t3.Count(Distinct(t3.Columns("id", "name")...)) + }(), + wantQuery: "SELECT COUNT(DISTINCT `t0`.`id`, `t0`.`name`) FROM `users` AS `t0` JOIN `users` AS `t0` ON `groups`.`id` = `t0`.`blocked_id`", + }, + { + input: Select(Sum("age"), Min("age")).From(Table("users")), + wantQuery: "SELECT SUM(`age`), MIN(`age`) FROM `users`", + }, + { + input: func() Node { + t1 := Table("users").As("u") + return Select(As(Max(t1.C("age")), "max_age")).From(t1) + }(), + wantQuery: "SELECT MAX(`u`.`age`) AS `max_age` FROM `users` AS `u`", + }, + { + input: Select("name", Count("*")). + From(Table("users")). + GroupBy("name"), + wantQuery: "SELECT `name`, COUNT(*) FROM `users` GROUP BY `name`", + }, + { + input: Select("name", Count("*")). + From(Table("users")). + GroupBy("name"). + OrderBy("name"), + wantQuery: "SELECT `name`, COUNT(*) FROM `users` GROUP BY `name` ORDER BY `name`", + }, + { + input: Select("name", "age", Count("*")). + From(Table("users")). + GroupBy("name", "age"). + OrderBy(Desc("name"), "age"), + wantQuery: "SELECT `name`, `age`, COUNT(*) FROM `users` GROUP BY `name`, `age` ORDER BY `name` DESC, `age`", + }, + { + input: Select("*").From(Table("users")).Limit(1), + wantQuery: "SELECT * FROM `users` LIMIT ?", + wantArgs: []interface{}{1}, + }, + { + input: Select("age").Distinct().From(Table("users")), + wantQuery: "SELECT DISTINCT `age` FROM `users`", + }, + { + input: Select("age", "name").From(Table("users")).Distinct().OrderBy("name"), + wantQuery: "SELECT DISTINCT `age`, `name` FROM `users` ORDER BY `name`", + }, + { + input: Select("age").From(Table("users")).Where(EQ("name", "foo")).Or().Where(EQ("name", "bar")), + wantQuery: "SELECT `age` FROM `users` WHERE (`name` = ?) OR (`name` = ?)", + wantArgs: []interface{}{"foo", "bar"}, + }, + { + input: Nodes{With("users_view").As(Select().From(Table("users"))), Select().From(Table("users_view"))}, + wantQuery: "WITH users_view AS (SELECT * FROM `users`) SELECT * FROM `users_view`", + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + query, args := tt.input.Query() + require.Equal(t, tt.wantQuery, query) + require.Equal(t, tt.wantArgs, args) + }) + } +} diff --git a/dialect/sql/driver.go b/dialect/sql/driver.go new file mode 100644 index 000000000..a7c44c242 --- /dev/null +++ b/dialect/sql/driver.go @@ -0,0 +1,150 @@ +package sql + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "time" + + "fbc/ent/dialect" +) + +// Driver is a dialect.Driver implementation for SQL based databases. +type Driver struct { + conn + dialect string +} + +// Open wraps the database/sql.Open method and returns a dialect.Driver that implements the an ent/dialect.Driver interface. +func Open(driver, source string) (*Driver, error) { + db, err := sql.Open(driver, source) + if err != nil { + return nil, err + } + return &Driver{conn{db}, driver}, nil +} + +// OpenDB wraps the given database/sql.DB method with a Driver. +func OpenDB(driver string, db *sql.DB) *Driver { + return &Driver{conn{db}, driver} +} + +// Dialect implements the dialect.Dialect method. +func (d Driver) Dialect() string { return d.dialect } + +// Tx starts and returns a transaction. +func (d *Driver) Tx(ctx context.Context) (dialect.Tx, error) { + tx, err := d.ExecQuerier.(*sql.DB).BeginTx(ctx, &sql.TxOptions{}) + if err != nil { + return nil, err + } + return &Tx{conn{tx}}, nil +} + +// Close closes the underlying connection. +func (d *Driver) Close() error { return d.ExecQuerier.(*sql.DB).Close() } + +// Tx wraps the sql.Tx for implementing the dialect.Tx interface. +type Tx struct { + conn +} + +// Commit commits the transaction. +func (t *Tx) Commit() error { return t.ExecQuerier.(*sql.Tx).Commit() } + +// Rollback rollback the transaction. +func (t *Tx) Rollback() error { return t.ExecQuerier.(*sql.Tx).Rollback() } + +// ExecQuerier wraps the standard Exec and Query methods. +type ExecQuerier interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +// shared connection ExecQuerier between Gremlin and Tx. +type conn struct { + ExecQuerier +} + +// Exec implements the dialect.Exec method. +func (c *conn) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + vr, ok := v.(*sql.Result) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect *sql.Result", v) + } + argv, ok := args.([]interface{}) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect []interface{} for args", v) + } + res, err := c.ExecContext(ctx, query, argv...) + if err != nil { + return err + } + *vr = res + return nil +} + +// Exec implements the dialect.Query method. +func (c *conn) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + vr, ok := v.(*Rows) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect *sql.Rows", v) + } + argv, ok := args.([]interface{}) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect []interface{} for args", args) + } + rows, err := c.QueryContext(ctx, query, argv...) + if err != nil { + return err + } + *vr = Rows{rows} + return nil +} + +var _ dialect.Driver = (*Driver)(nil) + +type ( + // Rows wraps the sql.Rows to avoid locks copy. + Rows struct{ *sql.Rows } + // Result is an alias to sql.Result. + Result = sql.Result + // NullBool is an alias to sql.NullBool. + NullBool = sql.NullBool + // NullInt64 is an alias to sql.NullInt64. + NullInt64 = sql.NullInt64 + // NullString is an alias to sql.NullString. + NullString = sql.NullString + // NullFloat64 is an alias to sql.NullFloat64. + NullFloat64 = sql.NullFloat64 +) + +// Note: +// NullTime is a modified copy of database/sql.NullTime from Go 1.13, +// It should be replaced with standard library code when Go 1.13 is released. + +// NullTime represents a time.Time that may be null. +// NullTime implements the Scanner interface so +// it can be used as a scan destination, similar to NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (n *NullTime) Scan(v interface{}) error { + if v, ok := v.(time.Time); ok { + n.Time = v + n.Valid = true + } + return nil +} + +// Value implements the driver Valuer interface. +func (n NullTime) Value() (driver.Value, error) { + if !n.Valid { + return nil, nil + } + return n.Time, nil +} diff --git a/dialect/sql/scan.go b/dialect/sql/scan.go new file mode 100644 index 000000000..787309b3d --- /dev/null +++ b/dialect/sql/scan.go @@ -0,0 +1,123 @@ +package sql + +import ( + "fmt" + "reflect" + "strings" +) + +// ColumnScanner is the interface that wraps the +// three sql.Rows methods used for scanning. +type ColumnScanner interface { + Next() bool + Scan(...interface{}) error + Columns() ([]string, error) +} + +// ScanSlice scans the given ColumnScanner (basically, sql.Rows or sql.Rows) into the given slice. +func ScanSlice(rows ColumnScanner, v interface{}) error { + columns, err := rows.Columns() + if err != nil { + return fmt.Errorf("sql/scan: failed getting column names: %v", err) + } + rv := reflect.Indirect(reflect.ValueOf(v)) + if k := rv.Kind(); k != reflect.Slice { + return fmt.Errorf("sql/scan: invalid type %s. expected slice as an argument", k) + } + var ( + scan *rowScan + typ = rv.Type().Elem() + ) + switch k := typ.Kind(); { + case k == reflect.String || k >= reflect.Bool && k <= reflect.Float64: + scan = &rowScan{ + columns: []reflect.Type{typ}, + value: func(v ...interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(v[0])) + }, + } + case k == reflect.Ptr: + typ = typ.Elem() + if scan, err = scanStruct(typ, columns); err != nil { + return err + } + wrap := scan.value + scan.value = func(vs ...interface{}) reflect.Value { + v := wrap(vs...) + pt := reflect.PtrTo(v.Type()) + pv := reflect.New(pt.Elem()) + pv.Elem().Set(v) + return pv + } + case k == reflect.Struct: + if scan, err = scanStruct(typ, columns); err != nil { + return err + } + default: + return fmt.Errorf("sql/scan: unsupported type ([]%s)", k) + } + if n, m := len(columns), len(scan.columns); n > m { + return fmt.Errorf("sql/scan: columns do not match (%d > %d)", n, m) + } + for rows.Next() { + values := scan.values() + if err := rows.Scan(values...); err != nil { + return fmt.Errorf("sql/scan: failed scanning rows: %v", err) + } + vv := reflect.Append(rv, scan.value(values...)) + rv.Set(vv) + } + return nil +} + +// rowScan is the configuration for scanning one sql.Row. +type rowScan struct { + // column types of a row. + columns []reflect.Type + // value functions that converts the row columns (result) to a reflect.Value. + value func(v ...interface{}) reflect.Value +} + +// values returns a []interface{} from the configured column types. +func (r *rowScan) values() []interface{} { + values := make([]interface{}, len(r.columns)) + for i := range r.columns { + values[i] = reflect.New(r.columns[i]).Interface() + } + return values +} + +// scanStruct returns the a configuration for scanning an sql.Row into a struct. +func scanStruct(typ reflect.Type, columns []string) (*rowScan, error) { + var ( + scan = &rowScan{} + names = make(map[string]int) + idx = make([]int, 0, typ.NumField()) + ) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + name := strings.ToLower(f.Name) + if tag, ok := f.Tag.Lookup("json"); ok { + name = strings.Split(tag, ",")[0] + } + names[name] = i + } + for _, c := range columns { + // normalize columns if necessary, for example: COUNT(*) => count. + name := strings.ToLower(strings.Split(c, "(")[0]) + i, ok := names[name] + if !ok { + return nil, fmt.Errorf("sql/scan: missing struct field for column: %s (%s)", c, name) + } + idx = append(idx, i) + scan.columns = append(scan.columns, typ.Field(i).Type) + } + scan.value = func(vs ...interface{}) reflect.Value { + st := reflect.New(typ).Elem() + for i, v := range vs { + st.Field(idx[i]).Set(reflect.Indirect(reflect.ValueOf(v))) + } + return st + } + return scan, nil +} diff --git a/dialect/sql/scan_test.go b/dialect/sql/scan_test.go new file mode 100644 index 000000000..a7d689cbe --- /dev/null +++ b/dialect/sql/scan_test.go @@ -0,0 +1,91 @@ +package sql + +import ( + "database/sql" + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestScanSlice(t *testing.T) { + rows := &mockRows{ + columns: []string{"name"}, + values: [][]interface{}{{"foo"}, {"bar"}}, + } + var v0 []string + require.NoError(t, ScanSlice(rows, &v0)) + require.Equal(t, []string{"foo", "bar"}, v0) + + rows = &mockRows{ + columns: []string{"age"}, + values: [][]interface{}{{1}, {2}}, + } + var v1 []int + require.NoError(t, ScanSlice(rows, &v1)) + require.Equal(t, []int{1, 2}, v1) + + rows = &mockRows{ + columns: []string{"name", "COUNT(*)"}, + values: [][]interface{}{{"foo", 1}, {"bar", 2}}, + } + var v2 []struct { + Name string + Count int + } + require.NoError(t, ScanSlice(rows, &v2)) + require.Equal(t, "foo", v2[0].Name) + require.Equal(t, "bar", v2[1].Name) + require.Equal(t, 1, v2[0].Count) + require.Equal(t, 2, v2[1].Count) + + rows = &mockRows{ + columns: []string{"nick_name", "COUNT(*)"}, + values: [][]interface{}{{"foo", 1}, {"bar", 2}}, + } + var v3 []struct { + Count int + Name string `json:"nick_name"` + } + require.NoError(t, ScanSlice(rows, &v3)) + require.Equal(t, "foo", v3[0].Name) + require.Equal(t, "bar", v3[1].Name) + require.Equal(t, 1, v3[0].Count) + require.Equal(t, 2, v3[1].Count) + + rows = &mockRows{ + columns: []string{"nick_name", "COUNT(*)"}, + values: [][]interface{}{{"foo", 1}, {"bar", 2}}, + } + var v4 []*struct { + Count int + Name string `json:"nick_name"` + Ignored string `json:"string"` + } + require.NoError(t, ScanSlice(rows, &v4)) + require.Equal(t, "foo", v4[0].Name) + require.Equal(t, "bar", v4[1].Name) + require.Equal(t, 1, v4[0].Count) + require.Equal(t, 2, v4[1].Count) +} + +type mockRows struct { + columns []string + values [][]interface{} +} + +func (m mockRows) Columns() ([]string, error) { return m.columns, nil } + +func (m mockRows) Next() bool { return len(m.values) > 0 } + +func (m *mockRows) Scan(vs ...interface{}) error { + if len(m.values) == 0 { + return sql.ErrNoRows + } + row := m.values[0] + m.values = m.values[1:] + for i := range vs { + reflect.Indirect(reflect.ValueOf(vs[i])).Set(reflect.ValueOf(row[i])) + } + return nil +} diff --git a/dialect/sql/schema/mysql.go b/dialect/sql/schema/mysql.go new file mode 100644 index 000000000..584ecf442 --- /dev/null +++ b/dialect/sql/schema/mysql.go @@ -0,0 +1,115 @@ +package schema + +import ( + "context" + "crypto/md5" + "fmt" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" +) + +// MySQL is a mysql migration driver. +type MySQL struct { + dialect.Driver +} + +// Create creates all tables resources in the database. +func (d *MySQL) Create(ctx context.Context, tables ...*Table) error { + tx, err := d.Tx(ctx) + if err != nil { + return err + } + for _, t := range tables { + exist, err := d.tableExist(ctx, tx, t.Name) + if err != nil { + return rollback(tx, err) + } + if exist { + continue + } + query, args := t.DSL().Query() + if err := tx.Exec(ctx, query, args, new(sql.Result)); err != nil { + return rollback(tx, fmt.Errorf("sql/mysql: create table %q: %v", t.Name, err)) + } + } + // create foreign keys after table was created, because circular foreign-key constraints are possible. + for _, t := range tables { + if len(t.ForeignKeys) == 0 { + continue + } + fks := make([]*ForeignKey, 0, len(t.ForeignKeys)) + for _, fk := range t.ForeignKeys { + fk.Symbol = symbol(fk.Symbol) + exist, err := d.fkExist(ctx, tx, fk.Symbol) + if err != nil { + return rollback(tx, err) + } + if !exist { + fks = append(fks, fk) + } + } + if len(fks) == 0 { + continue + } + b := sql.AlterTable(t.Name) + for _, fk := range fks { + b.AddForeignKey(fk.DSL()) + } + query, args := b.Query() + if err := tx.Exec(ctx, query, args, new(sql.Result)); err != nil { + return rollback(tx, fmt.Errorf("sql/mysql: create foreign keys for %q: %v", t.Name, err)) + } + } + return tx.Commit() +} + +func (d *MySQL) tableExist(ctx context.Context, tx dialect.Tx, name string) (bool, error) { + return d.exist( + ctx, + tx, + "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = (SELECT DATABASE()) AND TABLE_NAME = ?", + name, + ) +} + +func (d *MySQL) fkExist(ctx context.Context, tx dialect.Tx, name string) (bool, error) { + return d.exist( + ctx, + tx, + `SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_SCHEMA=(SELECT DATABASE()) AND CONSTRAINT_TYPE="FOREIGN KEY" AND CONSTRAINT_NAME = ?`, + name, + ) +} + +func (d *MySQL) exist(ctx context.Context, tx dialect.Tx, query string, args ...interface{}) (bool, error) { + rows := &sql.Rows{} + if err := tx.Query(ctx, query, args, rows); err != nil { + return false, fmt.Errorf("dialect/mysql: reading schema information %v", err) + } + defer rows.Close() + if !rows.Next() { + return false, fmt.Errorf("dialect/mysql: no rows returned") + } + var n int + if err := rows.Scan(&n); err != nil { + return false, fmt.Errorf("dialect/mysql: scanning count") + } + return n > 0, nil +} + +// symbol makes sure the symbol length is not longer than the maxlength in MySQL standard (64). +func symbol(name string) string { + if len(name) <= 64 { + return name + } + return fmt.Sprintf("%s_%x", name[:31], md5.Sum([]byte(name))) +} + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%s: %v", err.Error(), rerr) + } + return err +} diff --git a/dialect/sql/schema/schema.go b/dialect/sql/schema/schema.go new file mode 100644 index 000000000..74a8689a6 --- /dev/null +++ b/dialect/sql/schema/schema.go @@ -0,0 +1,266 @@ +package schema + +import ( + "fmt" + "strings" + + "fbc/ent/dialect/sql" + "fbc/ent/field" +) + +// Table schema definition for SQL dialects. +type Table struct { + Name string + Columns []*Column + Indexes []*Index + PrimaryKey []*Column + ForeignKeys []*ForeignKey +} + +// NewTable returns a new table with the given name. +func NewTable(name string) *Table { return &Table{Name: name} } + +// AddPrimary adds a new primary key to the table. +func (t *Table) AddPrimary(c *Column) *Table { + t.Columns = append(t.Columns, c) + t.PrimaryKey = append(t.PrimaryKey, c) + return t +} + +// AddForeignKey adds a foreign key to the table. +func (t *Table) AddForeignKey(fk *ForeignKey) *Table { + t.ForeignKeys = append(t.ForeignKeys, fk) + return t +} + +// DSL returns the default DSL query for table creation. +func (t *Table) DSL() *sql.TableBuilder { + b := sql.CreateTable(t.Name).IfNotExists() + for _, c := range t.Columns { + b.Column(c.DSL()) + } + for _, pk := range t.PrimaryKey { + b.PrimaryKey(pk.Name) + } + return b +} + +// SQLite returns the SQLite query for table creation. +func (t *Table) SQLite() *sql.TableBuilder { + b := sql.CreateTable(t.Name) + for _, c := range t.Columns { + b.Column(c.SQLite()) + } + // Unlike in MySQL, we're not able to add foreign-key constraints to table + // after it was created, and adding them to the `CREATE TABLE` statement is + // not always valid (because circular foreign-keys situation is possible). + // We stay consistent by not using constraints at all, and just defining the + // foreign keys in the `CREATE TABLE` statement. + for _, fk := range t.ForeignKeys { + b.ForeignKeys(fk.DSL()) + } + // if it's an ID based primary key, we add the `PRIMARY KEY` + // clause to the column declaration. + if len(t.PrimaryKey) == 1 { + return b + } + for _, pk := range t.PrimaryKey { + b.PrimaryKey(pk.Name) + } + return b +} + +// Column schema definition for SQL dialects. +type Column struct { + Name string // column name. + Type field.Type // column type. + Attr string // extra attributes. + Default string // default value. + Nullable *bool // null or not null attribute. + Size int // max size parameter for string, blob, etc. + Key string // key definition (PRI, UNI or MUL). + Unique bool // column with unique constraint. + Increment bool // auto increment attribute. +} + +// UniqueKey returns boolean indicates if this column is a unique key. +// Used by the migration tool when parsing the `DESCRIBE TABLE` output Go objects. +func (c *Column) UniqueKey() bool { return c.Key == "UNI" } + +// PrimaryKey returns boolean indicates if this column is on of the primary key columns. +// Used by the migration tool when parsing the `DESCRIBE TABLE` output Go objects. +func (c *Column) PrimaryKey() bool { return c.Key == "PRI" } + +// DSL returns the default DSL query for table creation. +func (c *Column) DSL() *sql.ColumnBuilder { + b := sql.Column(c.Name).Type(c.MySQLType()).Attr(c.Attr) + c.unique(b) + if c.Increment { + b.Attr("AUTO_INCREMENT") + } + c.nullable(b) + return b +} + +// SQLite returns a SQLite DSL node for this column. +func (c *Column) SQLite() *sql.ColumnBuilder { + b := sql.Column(c.Name).Type(c.SQLiteType()).Attr(c.Attr) + c.unique(b) + if c.Increment { + b.Attr("PRIMARY KEY AUTOINCREMENT") + } + c.nullable(b) + return b +} + +// MySQLType returns the MySQL string type for this column. +func (c *Column) MySQLType() (t string) { + switch c.Type { + case field.TypeBool: + t = "boolean" + case field.TypeInt8: + t = "tinyint" + case field.TypeUint8: + t = "tinyint unsigned" + case field.TypeInt64: + t = "bigint" + case field.TypeUint64: + t = "bigint unsigned" + case field.TypeInt, field.TypeInt16, field.TypeInt32: + t = "int" + case field.TypeUint, field.TypeUint16, field.TypeUint32: + t = "int unsigned" + case field.TypeString: + size := c.Size + if size == 0 { + size = 255 + } + if size < 1<<16 { + t = fmt.Sprintf("varchar(%d)", size) + } else { + t = "longtext" + } + case field.TypeFloat32, field.TypeFloat64: + t = "double" + case field.TypeTime: + t = "timestamp" + // in MySQL timestamp columns are `NOT NULL by default, and assigning NULL + // assigns the current_timestamp(). We avoid this if not set otherwise. + if c.Nullable == nil { + nullable := true + c.Nullable = &nullable + } + default: + panic("unsupported type " + c.Type.String()) + } + return t +} + +// SQLiteType returns the SQLite string type for this column. +func (c *Column) SQLiteType() (t string) { + switch c.Type { + case field.TypeBool: + t = "bool" + case field.TypeInt8, field.TypeUint8, field.TypeInt, field.TypeInt16, field.TypeInt32, field.TypeUint, field.TypeUint16, field.TypeUint32: + t = "integer" + case field.TypeInt64, field.TypeUint64: + t = "bigint" + case field.TypeString: + size := c.Size + if size == 0 { + size = 255 + } + // sqlite has no size limit on varchar. + t = fmt.Sprintf("varchar(%d)", size) + case field.TypeFloat32, field.TypeFloat64: + t = "real" + case field.TypeTime: + t = "datetime" + default: + panic("unsupported type " + c.Type.String()) + } + return t +} + +// unique adds the `UNIQUE` attribute if the column is a unique type. +// it is exist in a different function to share the common declaration +// between the two dialects. +func (c *Column) unique(b *sql.ColumnBuilder) { + if c.Unique { + b.Attr("UNIQUE") + } +} + +// nullable adds the `NULL`/`NOT NULL` attribute to the column. it is exist in +// a different function to share the common declaration between the two dialects. +func (c *Column) nullable(b *sql.ColumnBuilder) { + if c.Nullable != nil { + attr := "NULL" + if !*c.Nullable { + attr = "NOT " + attr + } + b.Attr(attr) + } +} + +// ForeignKey definition for creation. +type ForeignKey struct { + Symbol string // foreign-key name. Generated if empty. + Columns []*Column // table column + RefTable *Table // referenced table. + RefColumns []*Column // referenced columns. + OnUpdate ReferenceOption // action on update. + OnDelete ReferenceOption // action on delete. +} + +// DSL returns a default DSL query for a foreign-key. +func (fk ForeignKey) DSL() *sql.ForeignKeyBuilder { + cols := make([]string, len(fk.Columns)) + refs := make([]string, len(fk.RefColumns)) + for i, c := range fk.Columns { + cols[i] = c.Name + } + for i, c := range fk.RefColumns { + refs[i] = c.Name + } + dsl := sql.ForeignKey().Symbol(fk.Symbol). + Columns(cols...). + Reference(sql.Reference().Table(fk.RefTable.Name).Columns(refs...)) + if action := string(fk.OnDelete); action != "" { + dsl.OnDelete(action) + } + if action := string(fk.OnUpdate); action != "" { + dsl.OnUpdate(action) + } + return dsl +} + +// ReferenceOption for constraint actions. +type ReferenceOption string + +// Reference options. +const ( + NoAction ReferenceOption = "NO ACTION" + Restrict ReferenceOption = "RESTRICT" + Cascade ReferenceOption = "CASCADE" + SetNull ReferenceOption = "SET NULL" + SetDefault ReferenceOption = "SET DEFAULT" +) + +// ConstName returns the constant name of a reference option. It's used by entc for printing the constant name in templates. +func (r ReferenceOption) ConstName() string { + if r == NoAction { + return "" + } + return strings.ReplaceAll(strings.Title(strings.ToLower(string(r))), " ", "") +} + +// Index definition for table index. +type Index struct { + Key string // key name. + Column string // column name. +} + +// Primary indicates if this index is a primary key. +// Used by the migration tool when parsing the `DESCRIBE TABLE` output Go objects. +func (i *Index) Primary() bool { return i.Key == "PRIMARY" } diff --git a/dialect/sql/schema/sqlite.go b/dialect/sql/schema/sqlite.go new file mode 100644 index 000000000..29753b974 --- /dev/null +++ b/dialect/sql/schema/sqlite.go @@ -0,0 +1,74 @@ +package schema + +import ( + "context" + "fmt" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" +) + +// SQLite is an SQLite migration driver. +type SQLite struct { + dialect.Driver +} + +// Create creates all tables resources in the database. +func (d *SQLite) Create(ctx context.Context, tables ...*Table) error { + tx, err := d.Tx(ctx) + if err != nil { + return err + } + on, err := d.fkEnabled(ctx, tx) + if err != nil { + return fmt.Errorf("sql/sqlite: check foreign_keys pragma: %v", err) + } + if !on { + // foreign_keys pragma is off, either enable it by execute "PRAGMA foreign_keys=ON" + // or add the following parameter in the connection string "_fk=1". + return fmt.Errorf("sql/sqlite: foreign_keys pragma is off: missing %q is the connection string", "_fk=1") + } + for _, t := range tables { + exist, err := d.tableExist(ctx, tx, t.Name) + if err != nil { + return rollback(tx, err) + } + if exist { + continue + } + query, args := t.SQLite().Query() + if err := tx.Exec(ctx, query, args, new(sql.Result)); err != nil { + err = fmt.Errorf("sql/sqlite: create table %q: %v", t.Name, err) + return rollback(tx, err) + } + } + return tx.Commit() +} + +func (d *SQLite) tableExist(ctx context.Context, tx dialect.Tx, name string) (bool, error) { + query, args := sql.Select().Count(). + From(sql.Table("sqlite_master")). + Where(sql.EQ("type", "table").And().EQ("name", name)). + Query() + return d.exist(ctx, tx, query, args...) +} + +func (d *SQLite) fkEnabled(ctx context.Context, tx dialect.Tx) (bool, error) { + return d.exist(ctx, tx, "PRAGMA foreign_keys") +} + +func (d *SQLite) exist(ctx context.Context, tx dialect.Tx, query string, args ...interface{}) (bool, error) { + rows := &sql.Rows{} + if err := tx.Query(ctx, query, args, rows); err != nil { + return false, fmt.Errorf("dialect/sqlite: reading schema information %v", err) + } + defer rows.Close() + if !rows.Next() { + return false, fmt.Errorf("dialect/sqlite: no rows returned") + } + var n int + if err := rows.Scan(&n); err != nil { + return false, fmt.Errorf("dialect/sqlite: scanning count") + } + return n > 0, nil +} diff --git a/edge/edge.go b/edge/edge.go new file mode 100644 index 000000000..c68fa326f --- /dev/null +++ b/edge/edge.go @@ -0,0 +1,150 @@ +package edge + +import ( + "reflect" + + "fbc/ent/field" +) + +// Edge represents an edge in the graph. +type Edge struct { + typ string + tag string + ref string + name string + unique bool + required bool + inverse bool + parent *Edge + fields []*field.Field +} + +// To defines an association edge between two vertices. +func To(name string, t interface{}) *assocBuilder { + return &assocBuilder{&Edge{name: name, typ: typ(t)}} +} + +// From represents a reversed-edge between two vertices that has a back-reference to its source edge. +func From(name string, t interface{}) *inverseBuilder { + return &inverseBuilder{&Edge{name: name, typ: typ(t), inverse: true}} +} + +// Type returns the type of the edge. +func (e Edge) Type() string { return e.typ } + +// IsUnique returns is the edge is unique. +func (e Edge) IsUnique() bool { return e.unique } + +// AssocName returns the edge name. +func (e Edge) Name() string { return e.name } + +// IsAssoc returns is the edge is assoc type. +func (e Edge) IsAssoc() bool { return !e.inverse } + +// IsInverse returns is the edge is inverse type. +func (e Edge) IsInverse() bool { return e.inverse } + +// Assoc returns the assoc edge of the inverse edge. +func (e Edge) Assoc() *Edge { return e.parent } + +// RefName returns the reference edge name. +func (e Edge) RefName() string { return e.ref } + +// GetFields returns the edge fields. +func (e Edge) GetFields() []*field.Field { return e.fields } + +// Tag returns the struct tag of the edge. +func (e Edge) Tag() string { return e.tag } + +// IsRequired returns is this edge is an optional edge. +func (e Edge) IsRequired() bool { return e.required } + +func typ(t interface{}) string { + if rt := reflect.TypeOf(t); rt.NumIn() > 0 { + return rt.In(0).Name() + } + return "" +} + +// assocBuilder is the builder for assoc edges. +type assocBuilder struct { + *Edge +} + +// Fields sets the fields of the edge. +func (b *assocBuilder) Fields(f ...*field.Field) *assocBuilder { + b.fields = f + return b +} + +// Unique sets the edge type to be unique. Basically, it's limited the ent to be one of the two: +// one2one or one2many. one2one applied if the inverse-edge is also unique. +func (b *assocBuilder) Unique() *assocBuilder { + b.unique = true + return b +} + +// Required indicates that this edge is a required field on creation. +// Unlike fields, edges are optional by default. +func (b *assocBuilder) Required() *assocBuilder { + b.required = true + return b +} + +// StructTag sets the struct tag of the assoc edge. +func (b *assocBuilder) StructTag(s string) *assocBuilder { + b.tag = s + return b +} + +// Assoc creates an inverse-edge with the same type. +func (b *assocBuilder) From(name string) *inverseBuilder { + return &inverseBuilder{&Edge{name: name, typ: b.typ, inverse: true, parent: b.Edge}} +} + +// Comment used to put annotations on the schema. +func (b *assocBuilder) Comment(string) *assocBuilder { + return b +} + +// assocBuilder is the builder for inverse edges. +type inverseBuilder struct { + *Edge +} + +// Ref sets the referenced-edge of this inverse edge. +func (b *inverseBuilder) Ref(ref string) *inverseBuilder { + b.ref = ref + return b +} + +// Fields sets the fields of the edge. +func (b *inverseBuilder) Fields(f ...*field.Field) *inverseBuilder { + b.fields = f + return b +} + +// Unique sets the edge type to be unique. Basically, it's limited the ent to be one of the two: +// one2one or one2many. one2one applied if the inverse-edge is also unique. +func (b *inverseBuilder) Unique() *inverseBuilder { + b.unique = true + return b +} + +// Required indicates that this edge is a required field on creation. +// Unlike fields, edges are optional by default. +func (b *inverseBuilder) Required() *inverseBuilder { + b.required = true + return b +} + +// StructTag sets the struct tag of the inverse edge. +func (b *inverseBuilder) StructTag(s string) *inverseBuilder { + b.tag = s + return b +} + +// Comment used to put annotations on the schema. +func (b *inverseBuilder) Comment(string) *inverseBuilder { + return b +} diff --git a/edge/edge_test.go b/edge/edge_test.go new file mode 100644 index 000000000..dee56bf2b --- /dev/null +++ b/edge/edge_test.go @@ -0,0 +1,57 @@ +package edge_test + +import ( + "testing" + + "fbc/ent" + "fbc/ent/edge" + + "github.com/stretchr/testify/assert" +) + +func TestEdge(t *testing.T) { + assert := assert.New(t) + type User struct{ ent.Schema } + e := edge.To("friends", User.Type).Required() + assert.True(e.IsAssoc()) + assert.Equal("User", e.Type()) + assert.Equal("friends", e.Name()) + assert.True(e.IsRequired()) + + type Node struct{ ent.Schema } + e = edge.To("parent", Node.Type).Unique() + assert.True(e.IsAssoc()) + assert.True(e.IsUnique()) + assert.Equal("Node", e.Type()) + assert.Equal("parent", e.Name()) + assert.False(e.IsRequired()) + + t.Log("m2m relation of the same type") + From := edge.To("following", User.Type).From("followers") + assert.False(From.IsAssoc()) + assert.True(From.IsInverse()) + assert.False(From.IsUnique()) + assert.Equal("followers", From.Name()) + assert.NotNil(From.Assoc()) + assert.Equal("following", From.Assoc().Name()) + assert.False(From.Assoc().IsUnique()) + + t.Log("o2m relation of the same type") + From = edge.To("following", User.Type).Unique().From("followers") + assert.False(From.IsUnique()) + assert.True(From.Assoc().IsUnique()) + From = edge.To("following", User.Type).From("followers").Unique() + assert.True(From.IsUnique()) + assert.False(From.Assoc().IsUnique()) + + t.Log("o2o relation of the same type") + From = edge.To("following", User.Type).Unique().From("followers").Unique() + assert.True(From.IsUnique()) + assert.True(From.Assoc().IsUnique()) + + e = edge.To("user", User.Type).StructTag(`json:"user_name,omitempty"`) + assert.Equal(`json:"user_name,omitempty"`, e.Tag()) + From = edge.To("following", User.Type).StructTag("following").From("followers").StructTag("followers") + assert.Equal("followers", From.Tag()) + assert.Equal("following", From.Assoc().Tag()) +} diff --git a/ent.go b/ent.go new file mode 100644 index 000000000..1d4781497 --- /dev/null +++ b/ent.go @@ -0,0 +1,68 @@ +// Package ent is an interface package for the schemas that use entc. +package ent + +import ( + "context" + + "fbc/ent/dialect/sql" + "fbc/ent/edge" + "fbc/ent/field" + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" +) + +type ( + // Schema is the interface for describing an entity schema for entc. + Schema interface { + Type() + Edges() []Edge + Fields() []Field + } + + // Field is the interface for vertex and edges fields used by the code generation. + Field interface { + Tag() string + Name() string + Type() field.Type + IsUnique() bool + IsNullable() bool + IsOptional() bool + HasDefault() bool + Value() interface{} + Validators() []interface{} + } + + // Edge is the interface for graph edges in the schema. It is used by the code generation. + Edge interface { + Tag() string + Type() string + Name() string + RefName() string + IsAssoc() bool + IsUnique() bool + IsInverse() bool + IsRequired() bool + Assoc() *edge.Edge + } + + // Execer is the driver for executing gremlin requests. + Execer interface { + Exec(context.Context, string, interface{}) (*gremlin.Response, error) + } + + // Predicate applies condition changes on either graph traversal or sql selector. + Predicate struct { + SQL func(*sql.Selector) // common sql. + Gremlin func(*dsl.Traversal) // common gremlin. + } +) + +// DefaultSchema holds the default schema implementation. +var DefaultSchema = defaultSchema{} + +// defaultSchema is the default implementation for the schema. +type defaultSchema struct{ Schema } + +func (defaultSchema) Edges() []Edge { return nil } + +func (defaultSchema) Fields() []Field { return nil } diff --git a/entc/cmd/entc/entc.go b/entc/cmd/entc/entc.go new file mode 100644 index 000000000..4bb5e8143 --- /dev/null +++ b/entc/cmd/entc/entc.go @@ -0,0 +1,143 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "text/template" + "unicode" + + "fbc/ent/entc/plugin" + + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{Use: "entc"} + cmd.AddCommand( + &cobra.Command{ + Use: "init [schemas]", + Short: "initialize an environment with zero or more schemas", + Example: examples( + "entc init Example", + "entc init User Group", + ), + DisableFlagsInUseLine: true, + Args: func(_ *cobra.Command, names []string) error { + for _, name := range names { + if !unicode.IsUpper(rune(name[0])) { + return fmt.Errorf("schema names must begin with uppercase") + } + } + return nil + }, + Run: func(cmd *cobra.Command, names []string) { + path := "ent/schema" + _, err := os.Stat(path) + if os.IsNotExist(err) { + err = os.MkdirAll(path, os.ModePerm) + } + failOnErr(err) + for _, name := range names { + b := bytes.NewBuffer(nil) + failOnErr(tmpl.Execute(b, name)) + target := filepath.Join(path, strings.ToLower(name+".go")) + failOnErr(ioutil.WriteFile(target, b.Bytes(), 0644)) + } + }, + }, + &cobra.Command{ + Use: "describe [flags] path", + Short: "print a description of the graph schema", + Example: examples( + "entc describe ./ent/schema", + "entc describe github.com/a8m/x", + ), + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, path []string) { + graph, err := plugin.LoadGraph(path[0]) + failOnErr(err) + graph.Describe(os.Stdout) + }, + }, + func() *cobra.Command { + var ( + plugins []string + header, target string + cmd = &cobra.Command{ + Use: "generate [flags] path", + Short: "generate go code for the schema directory", + Example: examples( + "entc generate ./ent/schema", + "entc generate github.com/a8m/x", + ), + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, path []string) { + graph, err := plugin.LoadGraph(path[0]) + failOnErr(err) + + if target == "" { + abs, err := filepath.Abs(path[0]) + failOnErr(err) + target = filepath.Dir(abs) + } + graph.Target = target + graph.Header = header + failOnErr(graph.Gen()) + + // execute additional plugins. + for _, plg := range plugins { + failOnErr(plugin.Exec(plg, graph)) + } + }, + } + ) + cmd.Flags().StringVar(&header, "header", "", "override codegen header") + cmd.Flags().StringVar(&target, "target", "", "target directory for codegen") + cmd.Flags().StringSliceVarP(&plugins, "plugin", "", nil, "specifies additional plugin to execute") + return cmd + }(), + ) + cmd.Execute() +} + +// schema template for the "init" command. +var tmpl = template.Must(template.New("schema"). + Parse(`package schema + +import "fbc/ent" + +// {{ . }} holds the schema definition for the {{ . }} entity. +type {{ . }} struct { + ent.Schema +} + +// Fields of the {{ . }}. +func ({{ . }}) Fields() []ent.Field { + return nil +} + +// Edges of the {{ . }}. +func ({{ . }}) Edges() []ent.Edge { + return nil +} +`)) + +func failOnErr(err error) { + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + fmt.Fprint(os.Stderr, "\n") + os.Exit(1) + } +} + +// examples formats the given examples to the cli. +func examples(ex ...string) string { + for i := range ex { + ex[i] = " " + ex[i] // indent each row with 2 spaces. + } + return strings.Join(ex, "\n") +} diff --git a/entc/gen/bindata.go b/entc/gen/bindata.go new file mode 100644 index 000000000..21e21f6fc --- /dev/null +++ b/entc/gen/bindata.go @@ -0,0 +1,854 @@ +// Package gen Code generated by go-bindata. (@generated) DO NOT EDIT. +// sources: +// template/base.tmpl +// template/builder/create.tmpl +// template/builder/delete.tmpl +// template/builder/query.tmpl +// template/builder/setter.tmpl +// template/builder/update.tmpl +// template/client.tmpl +// template/config.tmpl +// template/dialect/gremlin/create.tmpl +// template/dialect/gremlin/delete.tmpl +// template/dialect/gremlin/group.tmpl +// template/dialect/gremlin/query.tmpl +// template/dialect/gremlin/update.tmpl +// template/dialect/sql/create.tmpl +// template/dialect/sql/delete.tmpl +// template/dialect/sql/group.tmpl +// template/dialect/sql/query.tmpl +// template/dialect/sql/update.tmpl +// template/ent.tmpl +// template/example.tmpl +// template/header.tmpl +// template/import.tmpl +// template/meta.tmpl +// template/migrate/migrate.tmpl +// template/migrate/schema.tmpl +// template/tx.tmpl +// template/where.tmpl +package gen + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _templateBaseTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x59\x5b\x6f\xe3\x36\xf6\x7f\xb6\x3f\xc5\xa9\xff\x99\x42\xca\x5f\xa5\xa7\xf3\xb0\x0f\x29\xb2\x40\x3a\x93\xe9\x0e\x90\xcd\x4c\x26\xc9\x16\x8b\x20\x48\x19\xe9\xc8\x26\x22\x91\x1a\x92\x72\x12\xb8\xfe\xee\x8b\x43\x52\x12\x7d\xdb\x64\xbb\xdd\x3c\x04\xf6\x21\x79\xae\xbf\x73\x21\xbd\x5c\x42\x81\xa5\x90\x08\x93\x7b\x6e\x70\x02\xab\xd5\x78\xbc\x5c\xc2\x41\xf3\x30\x83\xa3\x63\x20\x22\x1c\xb0\xf7\x4a\x96\x62\xc6\xbe\xf0\xfc\x81\xcf\x90\x36\x2d\x97\x60\xb1\x6e\x2a\x6e\x11\x26\x73\xe4\x05\xea\x89\x3f\x15\x38\x0c\xab\xa2\x6e\x94\xb6\x13\x38\x70\x4b\xd3\x29\x7c\xd1\x58\x88\x9c\xd6\x84\x01\x2e\x81\x57\x82\x1b\xb0\x0a\x50\x5a\xd6\x2f\xb2\xb1\x7d\x6e\x30\xda\x7c\xbc\xbe\xee\x58\x7d\xd6\x30\xd3\xaa\x6d\x0c\x54\xc2\x58\x50\x25\x34\xdd\xba\x81\x47\x61\xe7\x60\xe7\x08\x4a\x83\x6a\x50\x73\xab\x34\xdc\xa3\x7d\x44\x94\x44\xaf\xd9\xb8\x6c\x65\x0e\x9f\x75\x12\x9d\x62\x8c\xad\xc9\x49\xd7\xc5\xc2\x72\x3c\xd2\x68\x5b\x2d\xd7\xe9\xcb\xf1\x68\x74\x79\x71\x76\x04\xc4\x32\x31\x70\x68\xbe\x55\xec\x12\x2b\xcc\xad\xd2\x29\x9d\x1a\x8d\x4a\xa5\x41\x64\xd0\x90\x67\x35\x97\x33\x8c\xb5\x75\x3b\x46\xa2\x04\x01\x7f\x85\xb7\xe1\xeb\xc8\xb0\xcf\x3a\x49\xdd\xe7\x95\xfb\xdf\xb0\xcb\x8b\xb3\xc4\x38\x12\x51\x56\xd9\x78\x34\xfa\x45\x63\x5d\x09\x19\x84\x5b\x0d\x87\x85\xa9\xd8\x95\xe6\x0b\xd4\x86\x57\x41\xbc\xd5\x86\x24\xd7\xfc\x01\x93\x9b\x5b\x21\x2d\xea\x92\xe7\xb8\x5c\x65\xf0\x36\x83\x0a\x65\xe4\x86\x34\xed\x14\xbe\xfb\xf7\x0a\x5b\x5a\xbb\xbb\x63\xe7\xf8\x18\xf4\x6c\x58\x50\x27\xb1\x9e\x40\x72\x8f\x81\x37\x0d\xca\x22\xb1\xda\x64\x60\x7b\xf5\x47\x56\xb3\x5f\xe7\xa8\x31\xb9\xbb\x23\x53\xad\x36\x8c\x31\x27\x9d\x2c\x5b\x8d\x3d\x64\xce\x95\x25\x06\x95\x40\xe3\x22\x2a\x95\x1d\x42\xaa\x5c\x34\x61\x26\x16\x28\x07\x05\x43\x70\xcf\x95\x4d\x1a\xf8\x5f\x45\x34\x44\x83\x91\x94\x5e\xe9\xd7\x86\x63\xd3\x73\x9b\x8e\x8b\x5d\x43\x02\xec\x96\x5b\x3e\xeb\x02\x75\xef\x18\x2e\x41\x11\x41\xc8\x59\xe7\x13\xdb\xc9\x0c\xd9\xe4\x0f\x6c\xe4\xd1\x72\x19\x82\x7b\x50\x66\x70\xe0\x58\x90\x6a\xfe\xc3\x6a\x35\x1e\x51\x41\x28\xe1\x18\x1a\x6e\x72\x5e\xd1\x67\xa2\x4e\xa7\xe0\x17\x56\xab\xb5\xd8\xf8\x38\x94\x02\xab\xc2\x80\x90\xb4\xa9\x6d\x1a\xd4\x61\xab\x63\xcb\xc6\x23\x17\x9d\x8e\x41\x12\xb6\x33\xc6\x8c\x25\x03\xd2\xa0\x2a\x79\x2a\x04\xc7\x11\x9c\xe7\x5e\x8a\x4a\x87\xdb\x72\xc0\x6d\xe0\x1f\x65\x55\x81\xfa\xe7\xe7\x84\x4e\x0f\x4a\xa4\x51\xa6\xb9\x48\xbe\x32\x94\x2f\x0a\xb4\x9a\xfd\xfc\x9c\x94\x19\xd0\xe9\xe5\xb2\xf7\x64\xe7\xe3\x4d\xb9\x2b\x8a\xf1\x72\x09\x28\x8b\xae\x6a\x9e\xcc\x66\x1a\x67\x84\xd7\x28\xde\x3c\x10\x85\x92\x60\x2c\x36\x7d\x2e\x50\x51\xfc\xe1\xfe\x79\x00\xc0\xd4\x04\x1f\x05\x24\x0c\xec\x8c\xd5\x6d\x6e\x49\xd3\xe9\x14\x2e\x2f\xce\x1c\x83\x5c\x55\x6d\x2d\xe1\x51\x53\xda\x16\x43\x29\x8d\x05\x92\x4b\xe8\x03\x1b\x53\x44\x80\xfe\x9c\x97\x36\x42\xe2\x03\xea\xb8\x07\x6f\xc2\x0c\xad\x01\xfb\xa8\xa0\xe2\xf7\x58\x19\xe0\x06\x1a\xae\x79\x8d\x16\xb5\x61\x70\x35\x27\xff\x69\x63\xa1\x35\x58\x10\x86\x48\xf4\x6f\x27\xe6\x37\x6f\x24\x79\x9b\x28\x7d\xae\x67\x8e\x3b\x97\x85\x23\x1b\xcc\x95\x2c\x42\x73\x51\x0d\xa9\xc8\x2b\x90\xbc\xc6\xfe\xa4\xc4\x27\x1b\xd7\xb2\x44\x69\xb7\x46\x1d\x4b\x43\x6b\xf8\x0c\x53\x36\xee\xa2\x1f\xb0\xe6\xec\xc8\xa0\x03\x68\x4f\xd8\x80\x04\xe5\xa6\x0b\x98\x71\x2a\x40\x63\xb0\x2d\xd4\x4e\xcf\x39\x99\x1a\x25\xaf\x29\x67\xb9\x54\x76\x8e\x1a\xfc\xff\x6e\x4f\xe8\x63\x79\x6b\xac\xaa\x9d\x15\x86\xc1\x47\xa5\x01\x9f\x78\xdd\x54\x78\x34\x9e\x4e\xc7\xd3\xe9\xe8\x17\x0a\x39\x81\x8c\x80\xf7\x63\xe6\x01\xf8\x2e\x65\xb4\xd6\x07\x3b\xe9\x7a\xfb\x6a\xc5\x4e\x4c\xfc\xed\xb2\xad\xc3\xd1\x34\x83\x89\x69\xeb\x3b\xff\x6d\x92\x66\xf0\x8a\x53\xef\xd6\x4e\xbd\x9b\xa4\x5e\xf0\x65\xce\x65\x92\xdb\xa7\x0c\xbe\x5f\xa4\xa4\xa8\xcb\xfa\x13\x93\x94\x72\x40\x60\xe6\x60\xde\xb9\x75\x00\xe6\x50\x97\x7b\xda\x0b\x35\xd9\xf3\xf0\x49\x17\x8e\xd2\x06\x27\x2f\xf4\x4d\x27\x6c\x4f\xa5\x36\x96\x6b\x9b\xc1\xdd\x8b\x21\x5e\x93\x50\xca\xbe\x6e\x07\x06\x91\x04\x57\xa9\x87\x02\x4b\xe1\xcb\xe0\x80\x22\xfa\x91\x1c\x45\xf5\x82\xf7\x06\xf7\xb5\x56\x12\xbd\xab\x11\x0e\xb8\xa1\xe0\x7e\xc0\x92\xb7\x95\x0d\x9b\x56\xab\x33\xca\x1f\x82\x19\x81\xba\xf0\x8b\x3e\xa9\xd6\xf1\xde\x1f\xd8\x97\xc0\xd3\x29\x7c\xb2\x60\xe6\xaa\xad\x0a\xb8\x47\x9f\x78\xdc\xf3\xf5\x05\xe2\x07\xcb\x67\x8e\x5f\x81\xb9\x2a\x9c\x57\x94\x06\x0e\x35\x6f\xe0\x01\x9f\xdd\x92\x9b\x29\xb8\x87\x76\x5f\x32\xbc\x9b\xb0\x00\x8d\xa6\x51\xd2\x60\x10\x17\xba\x15\x0d\x7e\xcb\x25\x7c\x6b\x95\xc5\xde\x58\x78\x47\xcc\x6b\xa5\xfb\x42\x4a\xc9\xcd\x17\x4a\x14\x90\x2b\x59\x56\x22\xb7\x4e\x85\xd6\xa0\x2f\x0e\x7b\x21\xda\x9b\xee\x81\x4a\x38\xf5\xb9\x74\x47\xb2\x26\xe9\x6f\x4e\x9b\x3e\x23\x37\x93\x8d\xaa\x4f\xaf\xa7\x5a\xa0\xd6\xa2\x40\x10\x96\x8d\x47\xb9\x92\xc6\xee\x09\xc9\xf1\xb6\x4d\xe3\xa1\x65\xca\xcd\x9e\xb9\xed\x80\x9d\xe5\x42\xb9\x6e\x2a\xca\x18\x41\xab\xd5\x46\xd3\xa5\x29\xb8\x6f\x1d\xc8\xf3\xb9\xef\x06\x6b\x2d\xd7\xf9\x63\x07\x2b\xcf\xc0\xa3\xbe\xe7\xb1\x91\x93\x5b\x49\x09\x2f\x35\xe5\x38\x2d\xe3\xbc\xf4\x0a\xe0\x37\xa7\xd0\xe4\xef\xc8\x25\x5d\x40\x4e\x16\x4e\x74\x65\xc8\x0f\xbd\xb6\xbd\x36\xbb\xd4\x36\xec\x7d\x88\xee\x70\x72\x72\x38\x19\x2c\xd8\xd7\xd0\x87\x84\x7d\x65\xce\xd3\x80\x4e\xbb\x8f\x8f\x61\x32\xe9\xba\xbb\x23\xec\x06\x42\x34\xb9\xf7\x33\x66\x91\xd1\xd8\x77\x62\xbc\xf4\x94\xed\x30\xe8\x5a\x96\xaa\x2a\x92\x94\xfd\x83\x57\x2d\x9a\x60\x5c\x84\xe5\x74\x87\x8b\x12\x52\xf8\x4c\xe5\xbc\x4a\x7b\xcb\x49\x4c\x28\x47\x7b\x27\x8b\x53\xad\xcf\x95\xfd\xa8\x5a\x59\x84\x64\x35\xf0\x38\xa7\x3b\x92\x7e\xa6\xb8\x59\x05\x25\xda\x7c\x0e\x1c\x4c\x83\xb9\x28\x45\x4e\x23\xa4\xb0\xcf\x2e\x2b\x85\x85\x47\x6e\xdc\x48\x5e\x3a\x1e\xa1\x5f\x17\xdc\x72\xba\x3f\x86\x81\x23\x96\x32\x8c\x1c\xbe\x56\x85\x11\xa1\x57\x87\x4a\x09\xf5\xb6\x1a\xa5\xf5\x09\x82\x9e\xd8\xdd\x59\xc2\x70\x9f\x20\x1c\x46\x7c\x53\x7f\x36\x89\x11\xd7\x15\xe9\xda\xb2\xcb\x46\x0b\x69\xcb\x64\x32\x54\x87\x23\x78\x13\x69\x3e\xc9\x00\x99\xd3\x28\x0d\xba\x7c\x32\x5b\x9e\xe1\x70\xaf\x54\x85\x5c\x82\x90\x6e\x7c\x20\x41\x8f\x73\x74\x4d\x3b\x52\x95\x76\x0e\x3e\x71\xc4\xa0\xf5\xc0\x34\x41\xad\xfd\x52\xea\xb8\x92\xc2\x77\x19\xa8\x07\xaa\xff\xa8\x35\x4b\xd6\xcc\xeb\xad\x51\x0f\xe3\x38\x74\x97\x42\xce\xda\x8a\xeb\x17\xa3\xd7\xed\x8b\xa2\xe7\x6a\xac\xa5\xfd\x4a\xa2\x0b\xe4\xcb\x41\xec\xe5\xfd\xf9\x71\xec\x58\xff\x17\xa1\xec\xac\xdc\x13\xcd\x2d\x67\xfd\xa7\x01\x1d\xbc\xb8\x19\xd3\x8e\xf5\xab\xc3\xda\x5b\xbb\x11\xd9\xe0\xbe\xf7\xd4\x60\x34\x17\xd2\x7e\xe4\xa2\xc2\xbd\xd9\x99\x6b\xe4\x16\xa7\x6d\x53\x50\x35\xa6\x38\x76\xcd\xd3\x05\xda\xdf\x15\x0a\x62\x1a\xaf\xa9\x92\xac\x13\x1a\xf2\x5e\x8c\x81\xd2\x09\x5a\x1b\x30\x33\x58\x08\x55\xf9\x4e\xa4\x4a\xc0\x62\xe6\x78\xf8\x56\xd1\x4a\xf1\xad\x45\x89\xc6\x0c\x08\xd9\x52\x7b\x80\x49\x6d\x66\xfd\x7d\x80\xee\x16\xde\x4b\x7f\x08\x30\x3b\x04\xfd\x11\xd0\x78\x03\x22\x1f\x04\x17\x10\x9c\x1c\x82\x6a\x33\xeb\xf0\x73\x2d\x9d\xce\xbb\x34\x34\xec\x57\x77\x55\xda\x8b\xed\x6d\x5d\x3d\xb7\x24\x0d\x26\x46\xaf\x10\x8c\x16\x82\xcc\xf7\x8a\xc6\x8d\x75\x89\x54\xe7\xcf\x1d\xfd\x95\x6e\x21\x26\x49\xd4\xda\xd6\x5e\x7f\xd2\x48\xb4\xb1\x3a\x57\x72\xc1\x2e\x68\x18\x49\x90\x35\x1a\x4b\xf1\x94\xa4\xf0\xff\xc1\x13\x19\x48\x51\x91\x6a\x2f\xd9\x56\x73\x6d\xe6\xbc\xfa\x45\xf3\x66\x6e\x94\x4c\xee\xe1\xe6\xf6\xfe\xd9\x3d\xbe\x74\xe6\x2e\xb8\x86\x05\xdc\xfc\x78\x7b\xd8\x41\x82\x9a\xab\x76\x8f\x0f\xb3\x70\x8e\xf5\x9c\x92\x7b\x77\x89\xf8\xc9\xed\xf8\xee\x98\xf4\x88\xa7\x11\xd4\x9a\x3a\x1b\xb1\x58\xdc\xbc\xbd\xa5\x06\xbd\xb1\x83\x20\xe0\xf0\xb1\x81\x80\x5a\x18\x4a\xe9\x0e\x33\x0b\xea\xb8\x93\xb4\x63\xf6\x9d\x27\x1b\xf6\x37\x6e\xbe\x78\x6f\x1c\x92\x00\x82\x46\xe7\x9d\xf4\x35\x62\x84\x5c\xf0\x4a\x74\x73\x86\x1b\x97\x9d\x27\x02\xd0\x1c\x53\x2f\xd5\x79\x1a\x8e\xa1\x93\x7c\xa5\x45\xbd\x57\x74\x1f\xba\x10\x97\xe9\x14\xfc\x5a\x5f\x2c\xc2\x4d\x99\x48\x6e\xaa\x27\xc9\xb3\x70\xb1\x75\xb0\xe7\xd2\x9a\x0e\x40\x3b\xa3\xd9\xa3\xa0\x4b\xab\xc0\x1b\x26\xa7\xde\x82\x09\x84\x67\x3a\x7c\x3c\xd5\xfa\xda\xa5\x94\x1f\x65\x7c\x69\xa2\xd2\x19\x65\x98\x47\x00\xe9\x11\xb2\xcf\xcf\xf8\xdd\x8b\xdd\x26\x93\xc4\x15\xf1\x70\xa3\x85\x0e\xc3\x0b\x58\xc3\xf0\x2e\x1c\x46\xc0\xfe\x7e\xc7\xf2\xb2\x36\xb3\xa3\xf5\xca\xe0\x45\xbc\x31\xec\x4d\xb8\x76\x3b\x34\x1c\xc1\x9b\xff\x5b\x4c\x32\x88\xf5\xc8\x60\x91\xf6\xaf\x93\x91\xc6\xa7\x54\x1e\x5f\x67\x35\x55\xd2\x5d\x46\x13\x8b\xce\x66\xda\x93\x81\x18\xc6\xd3\x3f\xc7\x4e\x57\xc4\x23\x33\x45\xb1\x61\x63\x27\xb7\xb7\xf1\x93\x59\x67\xdb\x6a\xfc\x43\x1d\x74\xa3\xd0\xb6\x1a\xfb\x0e\xba\x25\xe0\x75\x7d\x74\x0b\xaf\xdb\x63\x92\xf0\xbc\xb9\xb4\xa1\xc7\xc8\xee\xe1\x47\x94\xd1\x9b\x65\x77\x41\x85\xb9\x72\x57\xce\xad\x34\xa1\x0f\x96\x0b\xe9\x5f\x6b\xd6\x06\x80\x0d\x11\x89\x86\xc3\x70\x9a\x7d\x0d\x6c\x53\xd8\xa9\x6f\xe6\x6c\x73\x35\x04\xc9\xb0\x9d\x21\x5c\xbd\x50\x1c\x35\x49\x69\x2b\xcb\x3e\x70\xcb\x33\xc0\xbd\x75\x52\x0a\x42\x30\xaf\x0c\xba\x5a\xd3\x55\xcf\x0c\xac\x6e\xb1\x2f\xec\xc2\x5c\x5e\x9c\x0d\x4a\x78\x93\xa2\x68\xbc\x64\x48\xdf\xcd\x65\x5b\xdf\xa3\x86\x1f\xdf\xfe\xe5\x1d\xc5\xff\xf4\xeb\xdd\x87\xeb\x2f\x77\xa7\xe7\x57\x5f\xff\x49\x23\x66\xfd\x6c\xbe\x55\x99\x1b\x44\x27\xd7\xe7\x9f\x2e\xae\x4f\xb7\x3b\xf1\x84\x0e\x5e\x5e\x9c\x09\xdb\xd5\x31\xe6\x9c\x41\x45\x32\xe0\x20\xb4\xfd\x9f\x60\xbb\x58\xd7\x66\x96\x85\x3a\xe5\xb4\x98\xa4\xf0\xfb\xef\x7b\xf7\xed\xd5\x61\xad\xc4\xef\x4b\xb2\x8c\x94\x59\x05\x57\x46\xde\x8d\x7c\xee\xf1\xa8\x55\x55\xdd\xf3\xfc\x01\x72\x5e\x55\xee\xa7\x2f\xfb\xc4\xbe\x76\x44\xf2\x06\x4d\x00\xf1\x73\xba\xcf\xa0\xe1\x4d\xa5\xdb\x1b\x32\xab\x04\x95\xe7\xad\xd6\x58\x04\x38\x76\x1b\x12\xfb\x04\x85\xe0\x74\x13\x67\x57\x4f\x4e\xc1\x2e\x86\x7d\x1f\x16\x25\xe8\x80\xac\x48\x0d\x72\xa7\xde\xc0\x10\x7d\x3d\x5e\xeb\x70\x6f\xcc\x11\xbc\xa1\xba\x11\x85\x21\x73\xe7\xfa\x0e\x8a\x5a\x77\x39\xbb\x0f\x56\xe9\x4f\xb4\x61\x47\x37\x8f\xbe\x86\x1f\x26\x85\x7b\x33\x4b\x84\x2c\xf0\x09\x0e\xdc\x18\x64\xe0\x6d\xca\x3e\x7d\x60\x57\x34\x7f\xae\x56\xe4\xe0\x07\x7c\x36\x6b\x0d\x90\x08\x53\x51\x18\x28\xb5\xaa\x7d\x4d\xa2\x12\x58\xf3\x26\x38\x8c\x36\x24\x35\x11\x6e\x82\x98\xd5\xea\xd6\x0f\xae\xd4\x5e\x6e\x6e\x7b\x2a\xe9\x19\xff\x90\xd6\x2f\xf4\x3f\xa3\xd5\xd4\x96\xdd\x7b\x18\x5d\xf5\x87\xdf\x04\x6a\x67\x62\xf4\x63\x98\x71\x25\x36\xb6\xd4\x8c\xe3\xdb\xf9\xbf\x02\x00\x00\xff\xff\xad\xf6\xe1\xcb\x9f\x1d\x00\x00") + +func templateBaseTmplBytes() ([]byte, error) { + return bindataRead( + _templateBaseTmpl, + "template/base.tmpl", + ) +} + +func templateBaseTmpl() (*asset, error) { + bytes, err := templateBaseTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/base.tmpl", size: 7583, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateBuilderCreateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x56\x4d\x6f\xdc\x36\x10\x3d\x4b\xbf\x62\x22\xa8\xc5\xca\xb0\xb5\xe8\x75\x0b\xf7\x50\x3b\x45\x0b\xa4\x6e\x03\xb7\x45\x80\xa6\x28\x68\x69\xa4\x25\xcc\xa5\x64\x92\x92\x6d\x08\xfa\xef\xc5\x90\xd4\x67\x9c\xed\xc6\x39\x99\xa6\x38\xef\x0d\xdf\x1b\xce\x6c\xd7\x41\x8e\x05\x97\x08\x51\xa6\x90\x19\x8c\xa0\xef\xc3\xae\x83\xb8\xbe\x2f\x61\x77\x09\x77\x4c\x23\xc4\xe9\x55\x25\x0b\x5e\xa6\xbf\xb3\xec\x9e\x95\xe8\xcf\x18\x3c\xd4\x82\x19\x84\x68\x8f\x2c\x47\x15\xb9\xa8\xbe\x0f\x97\x5f\xf9\xa1\xae\x94\x89\x20\x1e\x3e\xc5\x77\x0d\x17\x39\x2a\x22\xa8\x15\x97\x06\x36\x35\xd3\x19\x13\x10\xa7\x37\xec\x80\x09\x44\x57\xcb\x6c\x14\x66\xc8\x5b\x17\x31\xae\x47\x18\x82\xdd\x6e\x61\x8e\xdc\xf7\xc0\x35\x98\x3d\xc2\xb0\x53\x54\x0a\xec\x1d\xb9\x2c\x81\xd9\xc3\x96\x8c\x8e\xa2\x34\xdc\x3c\xa7\xa1\x79\xae\x71\x0d\xa3\x8d\x6a\x32\x03\x5d\x18\x64\x56\x84\x30\xe8\x3a\x50\x4c\x96\x08\xf1\xbf\xe7\x10\x17\x94\x53\x9c\xfe\xc4\x51\xe4\x9a\x52\x09\x82\xae\xbb\x80\xb8\x48\x6f\x6d\xa4\xfd\x40\x40\x67\x04\x5c\xa4\x7f\x10\x07\x1d\xeb\x3a\x40\x99\xfb\xe5\xc5\x1c\x12\x1d\xe4\xdb\xbc\xc4\x39\x22\xae\x11\x0f\xac\xfe\xdb\xde\xe3\x97\xeb\x01\xf6\x1f\x97\x6e\x37\xe1\x5f\xf4\x7d\xe8\x64\x7f\xe4\x66\x0f\xf8\x64\x68\x37\x86\xe8\x47\x77\xc7\x68\xa1\x63\xb0\x70\x4e\xa3\x31\x74\x22\xf5\x3e\xf8\x7c\x49\xec\x5b\xd6\xa2\xd3\x13\x9d\xce\x0b\x41\xb9\xb4\x7b\x39\x33\x8c\x2a\x28\x0d\x8b\x46\x66\xb0\x59\x58\x39\x48\x32\xb1\x27\x16\x75\x93\x99\x27\xc8\x2a\x69\xf0\xc9\x50\xe1\xd1\xdf\x04\x36\x67\x73\x82\x73\x40\xa5\x2a\x95\x90\x2d\xc7\xec\xb8\x18\xd5\xe3\x05\x54\x8a\xf4\xff\x99\xe9\x6b\x2c\x58\x23\x0c\x6c\x64\x65\x68\xeb\xb7\xda\xf0\x4a\x32\x91\xf8\xf3\x01\x2f\x60\x95\x6a\xea\xcc\x5b\x19\x70\x79\x09\x92\x0b\x4a\x22\x20\x16\x22\x59\x32\x78\xbc\x20\x68\x29\x2d\x7b\x83\xe9\x0d\x79\x4c\x7f\xf6\xaa\x92\xda\x30\x69\x60\x08\x39\x2d\x03\xf8\xb6\x1d\xd8\x51\x68\x9c\x18\x15\x9a\x46\x49\xca\xcf\x8b\xa5\xd3\x1b\x7c\xdc\x44\xc3\xe3\xee\xfb\x1d\x1c\xb8\xd6\xf4\x1c\x14\x3e\x34\x5c\x61\x0e\x85\xc5\xfd\x18\x39\x2e\x2f\xf6\xc7\x28\x4a\x46\x0e\x5f\x51\x41\x10\x38\x65\x67\x3b\x43\x89\xc5\x45\xfa\x17\x13\x3c\x67\xa6\x52\x83\x05\x41\xd7\x6d\xcf\x80\xe5\x39\xc8\x46\x08\x76\x27\x10\xb2\x3d\x66\xf7\x50\x49\xf1\x6c\xdf\x66\xe5\x4d\x70\x39\x68\x0b\x55\x35\x86\xfa\x93\x95\xb2\x65\xa2\x41\x38\xdb\x4e\x80\x10\x8f\x58\xbb\x4b\x60\x54\xd5\x93\x97\xa3\xb9\x93\x1b\xc9\x14\x6a\xeb\x61\x0a\xa7\x8a\x3d\xd1\xf2\x37\xde\x72\x58\x8a\x41\x25\x83\x4a\x7d\xde\xe5\x51\x11\xe8\x7b\x57\xcb\xff\x47\x95\x7c\x6f\x11\xdf\xcc\x6b\x6c\xe1\x6a\x71\x30\xe9\x5b\x72\xb6\x58\xba\xda\x8e\x54\x05\xe3\x82\x5c\xa5\xe5\xcb\xce\xee\xe0\x9b\x36\xb2\x05\xe2\x2c\xfe\xac\x3e\x3d\xcc\xfa\xd5\xc2\xf7\xe5\xfa\x84\x46\x46\xd0\x98\xfe\x29\xf9\x43\x83\xb3\x17\x27\x50\xae\x1b\x84\xd5\x65\xdd\xf6\x12\xf8\x01\xbe\xf3\x7a\x9c\x54\xe4\x8d\x30\xbc\x16\x08\x4c\x6b\x5e\xca\x03\x4a\xa3\xa1\x92\xc0\xa0\x71\x29\x60\x5e\xa2\x57\x06\xd7\x35\xff\x42\x91\xdb\x0b\xd8\xe2\xc2\xa9\xda\x8e\x37\x8e\x4f\x3a\xf7\xa2\x71\xbc\xea\xa5\x7e\x49\xd2\xf3\xb5\x7e\xe4\x26\xdb\x7f\x92\x65\xae\x68\x95\x5e\x73\x26\x30\x33\x1b\xdb\x58\x33\x9a\xfd\xb9\xdb\x49\x7f\x7d\xbe\x7d\xff\xee\x7c\xfc\xf7\xf6\xfd\x3b\x6e\x70\x17\x8e\xd9\xaf\x01\xf5\x83\x18\x9a\x79\xb2\x82\xba\xc1\xda\x34\xf2\x58\x70\xa9\xf0\x20\xb8\x9c\x01\xf8\x2e\x30\x8b\x39\x2a\x57\x23\x75\x53\xd3\xaf\x0e\xcc\x07\x5a\x92\xc6\x4e\x42\x3f\xbc\x3e\x40\xc6\x84\xd0\x6e\x90\x51\xf3\xa8\x99\xe4\x99\x26\x73\xed\x96\xa3\xd1\xc0\xa4\x63\xf9\xa2\x19\xf6\xe1\xe5\x21\xb6\x98\x61\x24\x71\x7b\x3e\xef\x1a\x73\x09\x66\x77\xf7\xad\x65\xd6\x08\x6c\xaa\x1b\xf7\x68\xfb\x70\x50\xa4\x7d\xf5\x9c\xf7\x12\x6d\xf5\x83\xd8\x0e\xbf\x03\xd7\x33\xff\xab\x80\xbd\xa1\xc7\xc0\xfd\xf2\xbf\x00\x00\x00\xff\xff\x44\xad\x3e\xa5\x92\x0a\x00\x00") + +func templateBuilderCreateTmplBytes() ([]byte, error) { + return bindataRead( + _templateBuilderCreateTmpl, + "template/builder/create.tmpl", + ) +} + +func templateBuilderCreateTmpl() (*asset, error) { + bytes, err := templateBuilderCreateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/builder/create.tmpl", size: 2706, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateBuilderDeleteTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x94\xc1\x6e\x9c\x30\x10\x86\xcf\xf8\x29\xa6\x88\x03\x54\x91\xf7\x9e\x2a\x97\x36\xbd\xa5\x69\xd2\x1c\x1a\xa9\xea\xc1\xb1\x87\x5d\x2b\xac\x71\xec\xa1\xbb\x2b\xe4\x77\xaf\x0c\x0b\x81\x6d\x36\x59\xb5\x3d\x01\xc6\xf3\xf9\xff\xc7\x33\xd3\xb6\xa0\xb0\xd4\x06\x21\x55\x58\x21\x61\x0a\x21\xb0\xb6\x85\xcc\x3e\x2e\xe1\xfc\x02\x1e\x84\x47\xc8\xf8\xa7\xda\x94\x7a\xc9\x6f\x84\x7c\x14\x4b\xdc\xef\x21\x5c\xdb\x4a\x10\x42\xba\x42\xa1\xd0\xa5\x7d\x54\x08\x6c\xfe\x57\xaf\x6d\xed\x28\x85\x6c\xf8\x95\x3d\x34\xba\x52\xe8\xe2\x01\xd6\x69\x43\x90\x5b\xe1\xa5\xa8\x20\xe3\xd7\x62\x8d\x05\xa4\x97\x73\x35\x0e\x25\xea\x5f\x7d\xc4\xf8\x3e\x62\x42\x60\x8b\x05\x4c\xc1\x21\x80\xf6\x40\x2b\x84\x61\xa5\xac\x1d\x74\x16\xb5\x59\x82\x88\x9b\x67\x47\xc6\x08\x34\xa4\x69\xc7\x19\xed\x2c\x1e\xd2\x3c\xb9\x46\x12\xb4\x2c\x91\x5d\x2a\x58\x62\x1d\x2a\x2d\x05\xa1\x87\x1f\x3f\xd1\x10\xbf\x19\x16\x58\x60\x2c\x0a\xfa\xbe\x42\x87\x20\x94\xf2\x20\xc0\xe0\x06\xc6\x90\x4e\xcd\x44\x1d\x67\x65\x63\x24\xe4\x53\xab\x21\xc0\xfb\xb9\x88\xa2\x27\xe6\xd6\x03\xe7\x7c\x76\x64\x71\xb8\x37\x2a\x9d\xd3\xf8\x44\xf0\x05\x08\x6b\xd1\xa8\xfc\xe8\x96\x33\xb0\x9e\x73\x5e\xb0\xc4\x21\x35\xce\xc0\xec\x16\x42\x88\x1e\x17\x0b\xf8\xbc\x45\x09\xb8\x45\xd9\x44\x6c\x74\xd4\xe7\xb8\x36\xf0\xd4\xa0\xdb\x9d\x6a\x2c\x72\x72\x49\x5b\x90\xb5\x21\xdc\x52\x2c\xb8\xf8\x2c\x00\x9d\xab\x5d\x74\xe3\x37\x9a\xe4\xea\x50\x07\x57\x2e\xbe\xf1\x4b\x2d\x2a\x94\x94\x17\xdd\x15\xc5\xa2\x55\xfd\x0a\xff\xb2\xbb\xbb\xbd\x3a\x1b\x3f\xef\x6e\xaf\x34\xe1\x39\x4b\x8e\x18\xe3\xfe\xa9\x1a\xd4\x14\x07\xa8\x6b\xb4\xd4\x98\xd7\x82\x97\x0e\xd7\x95\x36\x13\x80\xc2\x52\x34\x15\x4d\x62\x3a\x47\x9e\x5f\xe3\x26\x4f\x87\x4e\x0b\xe1\x1c\x1a\xe3\x1b\x1b\x3b\x05\xd5\x70\x62\x5a\xb0\x64\x9a\xea\xfb\x58\xd5\x95\x7e\xc4\xee\xeb\x0c\x1e\x1a\x02\x2b\x8c\x96\x1e\x74\x09\x62\xcf\x86\x5a\xca\xc6\xf9\x97\x72\x0f\xc7\x92\x7f\xff\x72\xf6\x5b\x96\xe8\x32\x52\x63\xe3\x1d\x9a\x1d\x5d\x7e\xe8\x76\xbc\xbb\x00\xa3\xab\x18\x92\x74\x9a\x72\x74\x6e\xd0\xdf\xb6\xb0\xd1\xb4\x02\xdc\x12\x1a\x05\x19\xa4\x1f\x7b\x01\xe9\xac\x8b\x93\xd9\xdc\xd8\x27\x61\xe1\x9f\xaa\xc5\x30\x9d\xf8\x7e\x22\x44\x4a\xf8\x57\xf0\xfe\xb6\x5e\x83\x67\xb5\xc1\x3f\xa6\xd5\x48\x4e\xbf\x9a\xe7\x19\x55\x1b\xfc\xf6\xe2\x98\x9a\x20\x42\x7f\x97\x07\xe0\x37\xa7\x95\xd7\x66\x59\xf5\x33\xe9\xf8\xb4\x9a\x03\x9f\x07\xd6\x1b\xf7\xff\x17\x9d\x3c\x75\x3a\x00\x67\xa7\x9f\xd4\xd0\x93\x16\x9a\x03\xf9\xd1\x42\xfb\x1f\xad\x70\xa2\xf8\xe3\x0d\x71\xa2\xe0\xfb\x51\xf1\x73\x45\xfd\x0e\x00\x00\xff\xff\xa0\x2a\x70\x3c\x6f\x07\x00\x00") + +func templateBuilderDeleteTmplBytes() ([]byte, error) { + return bindataRead( + _templateBuilderDeleteTmpl, + "template/builder/delete.tmpl", + ) +} + +func templateBuilderDeleteTmpl() (*asset, error) { + bytes, err := templateBuilderDeleteTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/builder/delete.tmpl", size: 1903, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateBuilderQueryTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\x6f\x6f\xdb\x38\xd2\x7f\x6d\x7f\x8a\x59\xc3\x4f\x20\x05\xae\xdc\x64\xf7\x55\x1e\xe4\x80\x6c\xd3\x16\xbe\x6b\x9b\xdd\xa6\xb8\x5b\x20\x08\x76\x15\x69\x64\xb3\xa5\x29\x47\xa4\x9c\x18\x5e\x7f\xf7\x03\x87\xd4\x5f\x4b\x8e\xec\xf5\x5d\xaf\x40\x50\x53\x1a\x0e\x39\x33\x3f\xfe\x38\x22\x67\xbd\x86\x10\x23\x26\x10\x06\x8f\x29\x26\xab\x01\x6c\x36\xfd\xf5\x1a\x86\x8b\x6f\x53\xb8\xb8\x84\x07\x5f\x22\x0c\xbd\x37\xb1\x88\xd8\xd4\xfb\xc5\x0f\xbe\xf9\x53\xb4\x32\x0a\xe7\x0b\xee\x2b\x84\xc1\x0c\xfd\x10\x93\x81\xe9\xb5\xd9\xf4\xab\x6f\xd9\x7c\x11\x27\x6a\x00\xc3\xec\xd5\xf0\x21\x65\x3c\xc4\x44\x0f\xb0\x48\x98\x50\xe0\x2c\x7c\x19\xf8\x1c\x86\xde\x27\x7f\x8e\x2e\x0c\x7e\xad\x4c\x26\xc1\x00\xd9\xd2\x74\xc8\x7f\xe7\x5a\xb4\xd6\xf1\x18\xca\x8a\x37\x1b\x60\x12\xd4\x0c\x21\x7b\x12\xc5\x09\x90\x85\x4c\x4c\xb5\x68\x65\x40\x2d\x8f\x42\x31\xc5\x50\x7a\x7d\xb5\x5a\x60\x5d\x9b\x54\x49\x1a\x28\x58\xf7\x7b\x01\xb9\xa2\xdf\xe3\x6c\xce\x14\x9c\x32\xa1\xfa\xbd\x38\xd1\x62\x70\x77\x7f\xa3\x7f\xf4\x7b\xa9\x60\x8f\x29\xc2\xdd\xbd\x54\x09\x13\xd3\x7e\x6f\x91\x60\xc8\x02\x5f\xa1\x84\xbb\x7b\x14\xca\xfb\x25\x7b\xd0\xef\x8d\xc7\xc0\x84\xc2\x64\x8e\x21\xd3\xfe\xd2\xb3\xa4\x79\xf4\xe4\x23\x07\xfd\xef\x54\x3e\x72\xef\x16\x39\x06\x2a\x4e\xfa\xbd\x69\x82\x73\xce\x04\x9c\x86\x92\x7b\x5f\x12\x7f\x89\x89\xf4\x79\xdf\x78\xe1\x5f\x33\x4c\x10\xfc\x30\x94\xe0\x83\xc0\x27\xc8\x87\x26\x17\x94\x5c\xe2\xf5\xa3\x54\x04\xe0\x54\x1c\xbc\xd9\xc0\x69\xd5\x74\xd7\xa8\x74\x16\x12\x3c\xcf\xab\xcc\xdd\xad\xcb\x6a\xff\x94\xd5\x6d\x36\x5e\xc9\xf2\x4b\xf0\x17\x0b\x14\x61\x7d\xc4\x92\xcc\x08\x16\xd2\xf3\x3c\xb7\xdf\x4b\x50\xa5\x89\x80\x9a\xa8\x35\xf2\x03\xf9\xde\x1a\x69\x02\x21\x15\x2e\x40\xc5\x64\x20\xc5\xb9\xb3\x79\xa4\xcc\x31\x5a\x98\x50\x2f\x1a\xa5\x67\x6c\xa4\x2f\xe1\x84\x7e\xbc\x30\x5b\x02\x85\x9d\xad\x00\x83\x95\xc3\xa7\x4b\xda\x9c\x58\x07\x83\x7e\x76\x9a\xaf\x19\xb4\x35\x00\xf4\x7a\x04\xf1\x4b\xae\x5f\xaf\xc7\xa7\xa0\x66\x4c\x42\x10\x87\x08\x33\x5f\x82\x64\x73\xc6\xfd\x84\xa9\x15\x3c\x31\x35\x03\x0c\xa7\x39\x84\x81\x09\x08\x38\xd3\x98\x51\xf3\x05\x87\xd3\xb1\x59\xcf\x89\x2f\xa6\x08\xc3\xdf\x47\x30\x44\xbd\xa4\x87\xde\xdb\x70\x8a\x52\x0f\x42\x33\xd7\x3a\x7e\x6f\xe7\x08\xf4\xbe\xac\x16\xb8\xcd\x14\x7a\x25\x51\xab\xb4\xbc\x31\x5f\xdf\xc1\xcc\x67\xc2\x70\x42\x90\x26\x09\x0a\x65\x1c\x0f\xb1\xa0\x87\x34\x70\xc1\x06\xe1\x14\xbd\x7e\xaf\x63\x4c\x5a\x47\x75\x6c\x74\x2a\x16\x99\x10\xf5\xcc\xe8\x17\x97\x70\xd2\x20\xb1\x36\x34\x73\x51\x8f\x82\x67\x9e\x6f\xfa\xbd\x9e\x7c\x62\x2a\x98\x6d\x09\x84\x89\xfe\xe5\x5d\x33\x5f\xf3\x85\xe3\xd2\x58\x81\xa6\xf1\xd0\x3c\xf2\x3e\xae\x6e\x7f\xfd\x30\xca\x9b\xb7\xbf\x7e\x60\x0a\x2f\xfa\xbd\x5e\x6f\xbd\x7e\x05\x2c\xd2\xf3\xff\x78\xfe\x11\xc8\xa7\xf4\x14\x86\x4c\x4f\xf4\x4c\x4f\x6c\x0d\xc3\xaf\xba\xf1\x9a\x1a\x99\xfc\x44\x4e\x84\xe6\x21\xb4\x22\x0c\x32\x09\x2d\x9e\x77\x45\x11\xc2\x2b\xab\x56\x9d\x69\x35\x9a\xda\xbe\xf8\x0f\x1c\x1d\x13\x00\x0a\x6d\xb1\xcf\x98\x77\xae\xe9\x70\xae\x3b\xd4\xed\x95\x8f\x9c\xdc\xef\x64\x42\x96\x29\x1d\x75\xee\xbd\x21\xa5\x65\x75\xd4\x9e\x5c\xeb\x0d\x4d\x2a\x5f\x28\x1d\x3e\xdb\xf1\xc7\xed\xe9\xd4\x7b\xa2\x79\x57\xee\x6c\xfa\xfe\x94\xf5\xcd\xc6\xfe\xb1\x65\x6c\xf4\x7e\xf9\x47\xa9\xfb\x9d\xf1\xd5\x66\x73\xef\xba\x1e\xa9\xea\xf5\xde\x25\xf1\xdc\x51\x3f\xe6\xed\xbf\xc7\x4c\x38\xea\x3c\x6f\xdf\x88\xfd\xd4\x7f\x25\xf5\x23\xd8\xd3\x1f\x86\x95\xf4\x16\x54\xb1\x2c\x9b\x86\x99\xe5\x59\xd6\x34\x93\xfc\x29\x6b\xea\x39\x9e\xd9\xe1\xb6\x63\x5a\x7a\x5a\x1b\x7a\x04\xea\xa7\xfd\x3d\x97\x41\x17\xb9\x44\x8d\xc7\x38\x31\x10\xbe\x01\xc7\x17\xa1\xfe\x7d\x73\x7e\x53\x41\xa9\x4b\x70\x1c\x9f\x82\x16\xfa\xf3\x4f\x70\xb4\x00\xf1\x17\xb3\x30\xd6\xeb\xd1\x35\x8c\xf5\x5d\xc1\x8a\xde\x9b\x98\xa7\x73\xd1\x39\x40\xda\xef\xd4\x43\xb6\x4c\xd2\xbe\xd5\x5c\x4f\x7f\x3b\xe2\x79\x7e\x84\x78\xee\x6d\x58\x1e\x49\x1b\xa2\x9b\xf3\x8f\xd5\x10\xf9\x52\xc6\xc1\xff\x4c\x80\x8e\xb7\x7a\x1a\xbd\xdd\xc5\x6d\xfb\xae\x6d\xf2\xb0\x08\x0d\xbf\x57\xb6\x86\x4f\xb8\x50\xa9\x30\x9b\x41\x96\x66\x36\x78\xc9\xbe\x2a\x79\xaa\xd8\x09\x6e\x91\x47\x9f\x31\xca\x36\x0f\xe3\x88\x4c\xd7\x25\xd8\x5f\xde\xcf\xb1\x9a\xb5\x1a\x58\xa5\xd7\xf2\xd2\xae\x6d\x35\x3b\x87\x98\x88\xb7\x3b\x01\x5b\x1e\xc7\xbb\x49\xd5\x3f\x9d\x3a\xfe\x76\xaa\xbf\x49\xd5\xdb\x0e\x16\x78\x13\x51\x56\x9c\xb9\x5d\xff\xd9\x44\x8b\x06\xe8\xf7\x28\x35\xb2\xef\x29\x6f\x7c\x8f\x0a\x8c\x88\x4e\x73\x69\xa4\xca\xc7\xca\x0a\x1e\x56\xc0\x94\x04\x16\x76\xce\x1e\xdf\xa3\x72\x02\xf5\x0c\x41\x2c\x14\x3e\x2b\x3d\x53\xfd\xff\x08\x58\x08\x19\x5e\xb4\xb7\x48\xd8\x39\x2d\x0f\x3a\x02\x4c\x92\x38\xa1\x84\xa2\x39\x45\xf4\xcc\x97\x42\xdd\x27\x93\x6b\x87\x85\xae\xeb\xdd\x08\xbe\xd2\x83\xbb\x36\x2f\x7e\x8f\xea\x37\xfd\x99\xc6\xd9\x37\xd4\x8d\x11\x3c\xa4\x0a\x16\xbe\x60\x81\xd4\xb1\xf6\x85\x19\x11\xe2\x20\x48\x13\xb9\x8f\x8d\xbf\x75\x37\xb2\x62\x63\x96\x3b\x7b\x9f\x8b\x01\xc8\xee\xa6\x55\x60\x7d\xa9\xd5\xba\xfd\x1e\x8b\x48\xee\x87\x4b\x10\x8c\x53\xd2\x45\x96\x38\x98\x24\xae\x0e\x6e\xd9\x67\x65\xf5\xd6\x19\xef\x58\x22\x8b\x70\xeb\x84\x34\xa2\x27\x4d\x61\x67\xa2\xf4\xdd\x00\x9f\x6d\x9f\xd3\xb7\x49\xf2\x29\x56\xef\xe2\x54\x84\xf0\x34\x43\x01\x22\xd6\xdd\x79\xfc\xa4\xbf\x96\x73\x25\x4f\xbe\x84\x48\x0b\x75\x76\x28\xcd\xad\xc9\xa3\xbb\x20\xa2\xd3\x61\x9e\x26\xf4\x8d\xdd\xc5\x9b\xe6\x33\xec\xcc\xf5\xae\x38\x37\x28\x69\x70\xa9\xf5\xa1\x60\x9c\xf4\x90\x5f\x59\x04\x1c\x85\xd3\x32\x9e\x0b\x97\x3a\x13\xad\x77\x3e\x29\x39\x6b\x0d\x75\xc4\x7e\xf0\x1f\x90\x6f\x6a\x51\x6b\xd2\x7e\xf7\xfa\x7e\xa4\x15\x96\x83\x58\x60\x9a\x9a\xc7\x42\xb5\xd1\xdd\x1c\x85\xc3\x31\x9c\x87\x76\xcb\xdd\x27\x27\xf0\xc3\x44\x66\x3e\x22\x18\x1f\x88\xea\xc9\xf5\x4b\xb8\x66\xe1\x3e\x98\x66\xe1\xa1\x18\x9e\x5c\xb7\xa0\x78\x9b\x18\x8c\xc7\x0a\x38\x2f\xfd\x04\x58\x28\xe1\xee\xbe\x26\x48\x7e\x63\xa1\x34\x1d\x76\xe0\x7a\x72\x2d\xc9\xd1\xff\xdf\x0c\xea\x32\x96\x59\x28\x4b\xb8\x35\x7a\xbb\x21\xb6\xac\xcc\x86\x86\x85\xb2\x11\xa6\x93\xeb\x2a\x50\x27\xd7\xc7\x85\x6a\x9b\xb3\x6b\xfe\xd3\x26\xb2\x70\x37\x40\x8d\xaa\xbf\x08\x51\x16\x66\xe7\x31\x82\xaf\x2a\x88\x8c\xf5\x83\x97\x88\x76\x54\xec\xc5\x99\x5b\x58\x04\x22\x56\x80\xcf\x7e\xa0\xf8\x0a\x62\x81\x59\x47\x8d\x4f\x23\x8e\xdd\x21\x9a\xed\x8f\xff\x79\x96\x3d\xdf\x9f\x65\xed\x69\xc3\x4e\xa6\x5d\xf7\x4d\x36\x79\x76\x51\x28\x79\x89\x38\x4d\x8f\xd7\x17\x07\xf1\x73\x88\x91\x9f\x72\xd5\xd2\xf9\x96\x89\x69\xca\xfd\x64\x17\xbf\x17\x88\x28\x68\x5b\xb7\x8e\xb5\x14\x48\xf3\xb1\x49\xbb\x48\xa4\xfe\x6a\xd6\xa1\x35\xd5\xe8\x79\x7b\x31\xd4\xd8\xb9\xdb\x42\xb0\x24\x7d\xd0\x22\xf8\x7e\x34\x7d\xde\x8d\xa6\x4b\x8b\x81\xa8\xba\x02\x7c\x16\xc2\xa5\x25\xdd\x32\xba\xf7\x61\xf1\x12\xae\x2b\xdd\xba\x20\x3a\x9b\x67\x09\xd9\x25\xa6\x37\xee\x3d\x2a\xba\x8f\xc3\xf3\x45\xdc\xf7\x40\x75\x4e\xe9\x57\x9c\x03\x3e\x63\x90\x2a\x94\x05\x52\xc1\x17\x61\xe9\x0b\x8a\x33\xa9\x20\x8e\x2a\x94\x64\x31\xde\xd9\x62\x4b\x9b\x0d\xd8\xbc\xbb\x6f\x25\xe9\x3d\x4e\x6a\xbb\x1d\xd4\xb6\x7c\x7e\xc9\x47\x5e\xd0\x7a\xdb\x77\x7d\x4b\x5f\xfb\x55\x5b\xf4\x6f\xe1\x56\xb2\x4a\x7a\x9f\xf0\xc9\x19\xac\xd7\xd9\x8d\xdf\x05\xa4\x42\xa6\x8b\x45\x9c\x28\x0c\xb3\x51\x07\x6e\xc1\xb0\x57\x9c\x17\x04\x7b\xc5\xf9\xb1\x10\xa8\xf5\x36\x07\xa4\x16\x8f\x43\x36\xcb\x5d\x7b\x64\x2b\xcd\x36\x8d\x60\x9d\x30\xb9\x96\x7b\xa1\xb4\x4c\xc1\xdd\x5d\x62\x09\xac\x11\xa2\x4d\xec\xf9\xdf\x07\x69\x46\xb1\x87\x82\xb4\xe8\x7f\x5c\x90\x4e\xae\x65\x01\xd2\xc9\xb5\x3c\x16\x48\xb5\xde\x36\x90\x36\xb2\xa4\x6c\x85\x64\x61\x7a\x77\x8e\x94\xd6\xbc\x37\x71\x2a\xaa\x47\x0c\x01\x3d\x89\x23\x6a\x4c\xd9\x12\xc5\x9e\x97\x91\xa4\xb2\x6d\xb3\x16\xea\x3b\xe1\x2b\x9f\xd5\xc1\x08\x2b\x6b\xd8\xc6\xd8\xeb\x83\x11\x46\x7a\x0b\x8c\x51\xf3\x58\x28\x33\xba\x9b\x83\xc1\x84\xad\x5d\x48\x6d\x50\x9a\xb0\x55\x36\xba\x2b\xba\x48\xa3\x35\xee\xed\x33\x2b\x1f\x61\x25\x29\x9d\xd8\x16\x3c\x37\xf3\x25\x20\xc7\x39\x0a\x25\xb3\x8c\x72\x9a\xf8\x8b\x59\x67\x13\x69\x84\x16\xb8\x3d\xc4\x31\xff\x4e\x78\xcb\xa7\x75\x30\xde\xca\x1a\xb6\xf1\x16\xf9\x5c\xe2\xc1\x98\x23\xdd\x05\xe6\xa8\x79\x2c\xcc\x19\xdd\xcd\x11\xd1\x01\xd1\xde\x45\x33\x60\x0b\xe8\xca\x96\x77\x05\x1d\x69\xec\xdb\x5a\xa2\x69\x12\xa7\x8b\x9f\x4b\xc5\x02\x95\xc2\x9e\x3f\x4d\xf1\x40\x04\x83\xff\x93\xef\x49\xd2\xd4\x0a\xd0\xf9\xb3\x69\x43\x2a\x31\x04\x15\x03\x69\x82\x25\x26\x8a\x05\x28\xe1\xc1\x7c\xc8\xc4\x09\xcc\xe3\x04\x21\x62\xc8\x43\x39\x0e\xec\xed\x19\xed\x17\x4a\x7b\x35\x8e\x14\x0a\xa3\xc4\x5c\x4f\x4d\xa7\x09\x4e\xa9\xe0\x26\x15\x81\x62\xb1\x90\x23\x72\x3d\x5d\xe6\x7f\x8d\x99\x00\xe7\x1b\xae\x64\x21\xe8\xc2\x60\x04\x03\x4a\x43\xd7\xeb\x57\x46\x0b\x47\x01\x43\xef\x1d\x0d\x6a\xca\xa0\x5e\xc1\x30\xd2\x06\x32\x11\xe2\x73\xf1\xee\xb5\x7e\x3b\x1e\x9b\x48\xfb\xf3\x05\xc7\x0b\xd3\xa4\x6f\xa1\xa5\x29\x42\x32\xb5\x4b\xe3\x31\x5d\xe6\x67\x1e\x8a\x8a\xdc\x68\xad\x5b\xd9\x16\xf4\x87\x69\xde\x52\xb7\x2f\xbe\x06\xd9\x1f\xd4\xd7\x6c\x20\x9a\x4b\xfe\xf8\x2a\x63\x71\x31\x30\x7c\x12\xcf\x99\xc2\xf9\x42\xad\x06\x24\x66\x67\xd3\xb3\x95\x1f\x0d\xb5\x56\x9e\xbd\x38\xf2\x48\xab\x0d\x43\xe3\x55\x4a\x54\xbd\x4a\x21\xf9\xab\xcc\x6d\x4e\xb1\x08\x2c\x77\xb9\x56\xe4\x36\xf0\x85\x39\x9a\x3f\x59\xba\x7a\x3a\xa5\xab\x97\xae\x97\x08\x76\x56\x14\x76\x30\x75\x5c\x23\x0b\x02\xf0\x3c\xcf\x3c\xb1\x5f\xf1\x15\x0c\x9a\x4d\xdc\x80\x29\xab\xee\xa8\x09\xbc\x5c\xdd\x41\x1d\x3c\x3b\x5c\x5e\xb7\x93\x15\x94\xad\xe9\xc5\x26\x9b\x8f\x29\xdb\x39\x3a\xe1\x99\x39\x98\x3b\xcc\x06\xd6\xcb\x6f\xff\xda\x28\xcf\xf4\x2f\x2e\xce\x5e\xba\x45\x2c\x16\x38\xf5\xd4\x0b\x5c\xaf\x70\x53\x56\xf4\xac\x74\xfc\x86\x30\xb0\x5e\x1c\x14\xe1\x1a\x58\xd0\x14\xf5\x87\xbd\x4a\xfd\xa1\x9d\xda\x58\x3e\xf2\xb1\x2d\x72\xf4\x6c\x65\x61\x76\xdd\xd6\x65\x98\x56\xbd\xd6\x8e\x36\xdd\x05\x4b\x7d\x6e\x2c\x63\xac\x61\xa3\x63\x2d\x23\xf5\x7a\xf5\xb0\xea\x5a\xcb\x58\x87\xe8\x76\x41\xa3\x05\x5b\x51\xb4\x18\x09\x09\x00\x70\x77\x9f\xaf\xb8\x2e\xc5\x8a\x7b\x55\x2b\xe6\x9a\x4d\x79\x5c\x91\x86\x66\xdc\xc8\x62\x51\xd0\x68\x56\x30\x97\xdb\xbe\x95\xac\x56\xdd\x9c\xad\xeb\x9a\xed\x6e\x31\xac\xa3\x6d\xf4\x3c\xef\xaa\xa0\xe2\xb6\x05\xdd\xa4\xde\xd3\xdd\x2b\x55\x75\x4d\x12\x23\x88\xc4\x76\x55\x63\x5d\xd2\x7a\x44\x53\x97\x56\xc8\x99\xfd\x46\xac\x1a\x4b\x1f\x8b\x52\xcb\xe8\x77\x09\xca\x94\x13\x23\xc7\x25\xdf\x2d\x7d\x9e\xe2\x01\x5e\xc9\x58\x73\xfb\xde\x74\x69\x82\x1e\xf9\x01\xae\x37\xae\x4d\x15\xaa\x39\xd6\x96\xe1\x47\x49\xb4\xb6\xb4\xca\x47\x5e\x90\xfb\xb2\x5b\xbe\xb5\xa5\xc4\x42\xb2\xaa\x68\x3b\xed\xaa\xe7\x5b\x75\x3d\x2f\x66\x5e\x7a\x80\x22\xf1\xd2\xad\x3d\xf2\xae\x3d\x82\xd6\x72\xdb\x5d\x8b\xda\x3a\x4f\xb0\x6c\x1a\xb6\xe5\x97\xb2\x43\xb6\x8e\x3f\xab\xa9\x98\xe1\xcc\x52\x21\xa7\xb2\x85\x9a\x73\xa6\xd8\xb2\x54\xcb\x19\x95\x73\x32\xa5\xf3\x31\x73\x3e\x62\xeb\x35\x8d\xc8\x66\x93\x7f\x36\x34\x1c\xd2\xe9\xbd\x1f\xa2\x24\x9e\xe7\x6b\xc1\xcb\x52\x2f\xc1\x57\xe0\x73\x1e\x3f\xa1\xbd\x1e\xcc\x4b\xbc\xf3\x65\x43\xbc\xae\x13\x39\xe2\xb7\x4a\x4d\x67\x47\x17\x67\x73\xdc\x79\xaa\xa2\x6a\xc7\x29\xa5\x6b\xe9\x06\x52\x20\xaa\x75\xe1\x6f\x70\x46\xb2\x9d\x0e\x2f\x1a\xe6\xe6\xe5\xee\x63\x92\x4e\xe0\xfd\x60\xc6\x70\xe9\x3f\x70\x34\xee\x20\x79\xed\x0e\x4a\x61\xd5\xcc\x17\x70\x66\x1c\xa1\x71\x4a\x45\x28\x59\x96\x98\x19\x61\xa6\xde\x0d\x26\x27\x0d\x38\xd9\xbe\x3d\x2a\xd7\xba\x2c\xed\xad\xcf\xa6\x5f\x09\x7f\xb1\x4a\xb2\x27\x2f\xae\x94\xc3\xe3\xb8\xf3\x2c\x46\x65\xe5\xb9\xcb\xd1\x4e\x27\x94\x41\xe1\x16\x3e\x2b\x3b\xa2\xbc\x62\x2a\x3e\xa8\xd5\xfa\xec\x4a\x3e\xea\xc9\x41\x7b\x66\x43\x92\xfb\x65\x36\xdd\x94\x67\xe9\x4d\xfb\x00\xf6\xe7\xbf\x03\x00\x00\xff\xff\x53\x3d\x5d\x54\x4c\x32\x00\x00") + +func templateBuilderQueryTmplBytes() ([]byte, error) { + return bindataRead( + _templateBuilderQueryTmpl, + "template/builder/query.tmpl", + ) +} + +func templateBuilderQueryTmpl() (*asset, error) { + bytes, err := templateBuilderQueryTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/builder/query.tmpl", size: 12876, mode: os.FileMode(420), modTime: time.Unix(1560146912, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateBuilderSetterTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\x5f\x6f\x9b\x3e\x14\x7d\x26\x9f\xe2\xfe\x10\xbf\x09\xa2\xd6\xd5\x5e\x2b\xe5\x65\xca\xaa\xe5\xa5\x7b\xe8\xf6\x14\x45\x13\x8d\x2f\x99\x37\x0a\x14\x43\xa6\x8a\xf9\xbb\x4f\xd7\x36\x86\x38\xa4\x49\x35\xed\x0d\xec\x73\xff\xf8\xdc\xe3\x03\x5d\x07\x1c\x33\x51\x20\x84\x12\x9b\x06\xeb\x10\x94\x9a\x75\x1d\x44\x8f\xad\xc8\x39\xd6\x70\xbb\x80\x2a\x95\xdb\x34\x87\x88\x3d\x6c\xcb\x0a\xd9\x07\xbb\x63\x81\x35\x6e\x51\xec\x0d\xd2\x3d\xbb\x70\xa5\x66\x84\xaa\xd3\x62\x87\x10\x7d\xbb\x82\x28\x23\x60\xc4\xee\x04\xe6\x5c\xd2\x7e\x40\x59\x2a\x5a\xcd\xcb\x5f\x58\x43\x5c\xd5\xa2\x68\x32\x08\xff\x67\xef\x65\x08\x51\xc6\xbe\xbc\x54\x98\x38\x68\xd6\x16\x5b\xdd\x16\xc1\x20\x7c\xc0\x26\x84\xb8\xef\x31\x63\xf7\xe9\x93\x05\xdf\xdc\x80\xc3\x2b\x05\x12\x1b\x09\xcd\x77\x34\x8b\x1a\x47\xcb\x19\x35\xc2\x66\x81\x86\xc5\x07\x27\x52\x0a\xe6\x63\x2e\x94\x4a\xc6\x19\x63\xd3\xb8\x52\x36\x23\xb5\xa9\x31\x5e\x10\x74\xb3\x20\xf0\x12\x33\x13\xf2\xd0\xd4\xed\xb6\xd1\x5c\x10\x70\x01\xef\xfa\x9c\xb3\x20\xa8\xb1\x69\xeb\x02\xbc\xc8\x59\x60\x78\x10\x19\x94\x35\xe5\xf8\x5c\x35\xa2\x2c\xcc\xe1\x3f\xa5\x72\x89\x59\xda\xe6\x8d\xa3\xab\x10\x79\x9e\x3e\xe6\x78\x77\x44\xdb\xbd\xdd\x39\x41\x5f\xcf\xdf\x41\x82\x33\x3c\x52\x57\xb4\xb5\x13\x7b\x2c\x60\x9f\xe6\x2d\x82\x90\x50\x94\x0d\x14\x22\x67\xb3\xe0\x2d\x34\x7b\x85\x07\xba\xe7\x17\xf0\x1d\x88\x0c\x5c\xc0\x7f\x0b\x2a\x6f\xd6\xa7\x27\x61\x4b\xcc\xfb\x90\x84\xa0\x44\xc2\xc9\x29\xd8\x31\x60\xc1\xed\x4d\xb0\x4f\x9e\xdc\xd1\xc8\xfd\x23\xdf\xe1\xa0\xf6\x52\xcb\x3d\x4c\x39\xa7\xfb\x66\x86\x19\x21\xfb\x5a\x88\xe7\x16\xcd\x0a\x61\x16\xfa\x56\x5a\x88\x4d\xaf\xe3\x05\x97\x87\xe3\x74\x03\x2c\xab\x04\x62\x29\x8a\x5d\x9b\xa7\x35\xe5\xd4\xe3\xf9\x6d\xef\x70\x02\xe1\x6a\x29\x4f\xd7\xec\xf3\x4e\xa7\xed\x5f\xd0\x8a\x24\x5c\x2d\xbd\xde\xac\x62\xfa\x34\xf6\x66\x94\x44\xe8\x20\x19\x74\x92\x41\xbe\x43\x68\x4a\xbb\x4a\xe3\x74\x5b\x8f\x2f\x20\xb8\x69\x92\xc4\x33\x6e\x54\xba\x82\x6f\xbb\xb5\x43\x57\xf1\xf1\xe9\x75\x31\xcc\xa5\x79\x96\xc0\x18\x73\x65\x74\x34\x5b\x2d\x5f\x17\x9c\xd5\x9b\xaf\x2c\x3c\xba\xe3\x23\x2d\x5e\x16\x00\x4f\xe9\x4f\x8c\x9f\xd2\x6a\xed\x35\xb2\x91\x1a\xd9\x69\xb5\x2a\x63\x32\x07\xe7\xba\xd6\x42\xbd\xa8\xcc\x5a\xf0\x0d\x2c\xa0\xcf\xd8\x99\x74\xd7\x86\x13\x9b\x27\x2b\x6b\x10\xda\xe7\xb5\xbe\x89\xa7\xd3\x57\x6a\xa2\x80\x5c\x8b\xcd\x51\x91\xc0\x55\xb2\x22\x3a\xef\x7b\x69\xc1\x47\x87\x8c\x70\xb0\x40\x65\x59\x70\xde\xb1\x5a\xca\x8b\x8c\xcf\xd3\xf4\xb1\xfb\xf5\x89\x7c\x03\xbc\x5c\xcd\xff\xc4\x1b\x87\xb6\x62\xc1\x0d\xf4\xac\x52\x49\xaa\x82\x9f\x36\x45\xad\xb9\xa9\x91\x8e\x6e\xd0\x5c\xf0\xb7\x5a\xe4\xb9\xef\xfc\x88\x33\xf7\xb1\x17\x19\xe0\x33\x45\x1d\x30\x62\x12\x2d\x20\xdc\x87\xf6\x75\x5c\x22\x7b\xc5\x19\xfd\x71\x4f\xfc\x26\x9c\x75\xac\xae\xf3\x4d\x69\xec\x49\xd3\x0a\xf8\xfb\xff\x8b\x09\x23\x1c\x7b\xd4\xdc\xab\xf9\xca\x6f\xc8\x94\x43\x4c\x4f\xd0\x1f\x7a\xdf\x0f\x5b\x2d\x93\x09\x7b\x20\x3f\xb8\xb5\x6e\xb5\xde\x78\x52\xbc\x82\x1c\x0b\x97\x21\x49\x7a\x5f\xd2\x7e\x12\x8a\xe1\x9b\x44\xf3\x16\x06\x65\xf6\x17\x10\xfe\x18\x7d\x67\x46\x5e\x64\xf6\x95\x1a\x2c\x69\x60\x4c\xab\x9a\x2c\xa7\x07\x6d\xac\xa8\x69\x7b\x58\x64\xab\xe5\x19\x19\xfb\x24\x08\x2e\x19\x63\x89\x67\x5a\xe3\x3f\x80\xe1\xe9\x4f\x00\x00\x00\xff\xff\x7d\xea\xb3\xe5\x5f\x0b\x00\x00") + +func templateBuilderSetterTmplBytes() ([]byte, error) { + return bindataRead( + _templateBuilderSetterTmpl, + "template/builder/setter.tmpl", + ) +} + +func templateBuilderSetterTmpl() (*asset, error) { + bytes, err := templateBuilderSetterTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/builder/setter.tmpl", size: 2911, mode: os.FileMode(420), modTime: time.Unix(1557131143, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateBuilderUpdateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x59\x5b\x6f\xe3\xba\x11\x7e\x96\x7e\xc5\x54\xd0\x39\xb0\x82\x84\x3e\xe7\x35\x85\xfb\x70\x4e\x76\x81\x05\xb6\x7b\x4b\x77\xbb\x68\x36\x58\xd0\xd2\xc8\x66\x23\x53\x0a\x49\xc5\x49\x5d\xfd\xf7\x82\x37\xdd\x2c\x3b\xc9\x22\x45\x2f\x4f\xb1\x78\x19\xce\xcc\x37\x33\xdf\x90\xd9\xed\x20\xc3\x9c\x71\x84\xa8\xae\x32\xaa\x30\x82\xa6\x09\x77\x3b\x88\xab\x9b\x15\x9c\x2f\x60\x49\x25\x42\x4c\x7e\x2f\x79\xce\x56\xe4\x03\x4d\x6f\xe8\x0a\xdd\x1a\x85\x9b\xaa\xa0\x0a\x21\x5a\x23\xcd\x50\x44\x76\x57\xd3\x84\xc3\x59\xb6\xa9\x4a\xa1\x22\x88\xfd\x54\xbc\xac\x59\x91\xa1\xd0\x07\x54\x82\x71\x05\xb3\x8a\xca\x94\x16\x10\x93\x77\x74\x83\x09\x44\x9f\x87\xda\x08\x4c\x91\xdd\xd9\x1d\xed\xef\x56\x4c\xd3\x84\xf3\x39\xf4\x05\x37\x0d\x30\x09\x6a\x8d\xe0\x47\xf2\x52\x80\x31\x91\xf1\x95\x59\x6a\x4e\xd2\x0b\x91\x2b\xa6\x18\x4a\x12\xaa\x87\x0a\xc7\x62\xa4\x12\x75\xaa\x60\x17\x06\xa9\xf1\x41\x18\xec\x76\x67\x3d\xe3\xac\xdb\xe6\x39\xc3\x22\x93\xda\xc6\xb3\xa6\x09\x83\x4a\x60\xc6\x52\xaa\x50\xc2\xd5\x35\x72\x45\x3e\xf8\x81\xb0\x09\xb5\xb2\x7f\x5d\xa3\x40\xa0\x59\x26\x81\x02\xc7\x2d\xb4\x3b\x8c\xa6\x3d\xcd\x49\x98\xd7\x3c\x85\x59\xdf\x0d\x4d\x03\x27\x43\x3d\x13\x2b\x71\x56\x49\x20\x84\x0c\x4e\x4c\xc6\x6b\xb5\x31\x43\x69\xa4\xa7\xef\x02\x68\x55\x21\xcf\x66\x07\x97\x9c\x42\x25\x09\x21\x49\x18\x08\x54\xb5\xe0\x30\x40\xa8\x69\x42\x8b\xf2\x96\xa9\x35\xe0\xbd\x42\x9e\x41\x0c\xd1\x6f\xf6\xfc\x68\x00\x5b\x30\x08\x14\x89\x4a\xe9\x15\xc4\xc1\xae\x77\x36\x3f\x2a\xcc\x01\x83\xd9\x0a\xe5\xbe\xc8\xf9\x1c\x2e\xe9\x1d\x02\xde\x63\x5a\x6b\xb3\xb5\xc7\x6f\x6b\x14\x0f\x40\x79\x06\xd6\x30\x3b\xca\xeb\xcd\x12\x05\x94\x39\x88\x72\x2b\xe7\x77\x28\x14\x4b\x51\xc2\x86\xaa\x74\x8d\x19\x2c\x1f\x40\xad\x99\x84\xb2\x42\x41\x15\x2b\xf9\x14\x62\x30\x05\x99\xd6\x60\x96\xaa\x7b\x48\x4b\xae\xf0\x5e\xe9\x34\xd3\x7f\x13\x98\x31\xae\x4e\x01\x85\x28\x45\xe2\xe0\x1a\x79\xe0\x93\x13\x1c\xf5\xce\x88\x5c\x7e\xba\x44\x8c\xfe\x86\xa2\xfc\x42\x8b\x1a\x23\xf8\xc5\xc6\xe5\xa4\x8b\xee\x68\xc1\x32\xaa\x4a\xe1\xfd\x64\x42\x5c\x9f\x63\xf6\xc8\x2d\x53\xe9\x7a\x0c\x32\xc9\x84\xfe\x45\x2e\x18\x2d\x30\x55\x33\xa3\x66\xaa\xab\x45\x66\x47\xc8\x9f\x1f\x2e\x3f\xbe\x3d\x6d\x3f\x2f\x3f\xbe\x65\x0a\xcf\xc3\xe0\x40\xd4\x10\x79\x5b\x78\x87\x24\x23\x51\xef\xb0\x52\x35\x37\x9b\xbd\xfb\x8d\x77\x74\x39\x18\x8b\x59\x09\xdc\x14\x8c\xf7\x44\xf9\x03\x0b\xe4\x33\xbf\x3d\x31\xfb\xc3\x20\xc3\x9c\xd6\x85\xea\xa9\xf5\x8b\xf3\xbb\x24\xef\x70\x3b\x8b\x7c\x2d\x6c\x9a\x73\xa8\xb9\xac\x2b\x5d\xcb\x30\xf3\xaa\x45\x49\x18\x34\x61\x17\x50\x5f\x75\xdd\x29\xd8\x0d\x9a\xaf\x53\x58\xd6\x0a\x2a\xca\x59\x2a\x81\xe5\x40\xb9\x95\x0d\x65\x9a\xd6\x42\x3e\x2b\x50\xbe\x4e\x47\x8a\xae\x9e\xbb\x30\xa0\x79\x8e\xa9\xc2\xec\xa0\x5b\x7a\xfe\x60\xb9\x59\xf4\x87\x05\x70\x56\xe8\xcd\x81\xd1\x70\x86\x42\x18\x6b\xbc\x27\xbc\x4c\x67\xde\xab\x7b\x4c\x27\xf2\xe5\xc9\x46\xe8\xfd\xd3\x36\x58\x9f\xec\xc2\xe0\xfb\x53\xd4\x77\xda\x69\xf8\x3a\xc5\x3a\xbf\xeb\xaf\x97\xf2\xbb\x91\x3c\xad\xf3\xae\xf5\xe3\x84\xb6\xde\xd4\xe4\x8f\xc7\x3d\xfd\xc4\xda\x36\xca\xeb\xbd\x52\xe7\x62\x71\x2e\x6f\x8b\xb9\xa7\xf1\x17\xa9\xa1\x5e\xb0\xcb\xa9\x63\xc2\xe3\x92\xe3\x98\xd6\x73\x88\x7e\x92\xef\x39\x0e\x8f\x18\x38\xab\x4f\xe7\x3d\x09\xae\x44\x8f\xe4\x3e\xc2\xea\x14\x24\xe3\xab\x02\x27\xe8\xfd\xa1\x47\xee\x43\x81\xfb\xfc\xce\x32\x2b\xe0\xcd\x05\xf9\x8b\xde\xe3\x0b\xe2\x11\xce\x7f\x9c\xf3\x86\xb6\x3d\x8d\xf6\x7e\x58\xe0\x8b\x51\x9f\x15\x94\xb5\x3e\x3c\x92\x36\x43\xaf\x1e\xe5\xb6\x93\x3e\x3e\x2f\xca\x72\x11\x67\x45\xf4\xff\xc2\x74\x07\x36\xef\xf3\xdb\x3e\x87\x71\x56\xfc\x97\xb0\xd8\x20\x2c\x8e\x12\xd9\x20\x2a\x7c\x8b\x4a\x3e\x75\x02\x5f\x92\xda\xc6\xb2\x8f\x53\x1c\x94\xdc\x7c\x3c\x37\x0d\xfe\x67\x38\x6f\x42\xeb\xff\x20\xed\xf5\xb4\xf9\x77\x33\xdf\xb1\x2a\xfa\x0c\xf2\xeb\x7e\xce\x4f\x40\xae\xa9\xc0\xcc\x13\x8b\x25\x09\x58\xa2\xda\x22\xda\x30\x52\xdb\xca\x55\x56\x21\xe1\x64\x6e\xa5\x0d\xaf\xe1\x9e\x5b\xec\x9c\xa0\x7c\x85\x10\x7f\x3f\x85\x38\xd7\x7e\x8f\xc9\x6b\x2b\xd5\x57\xb0\x38\x27\x97\xe6\x38\x33\xde\xc2\x9a\xb7\x04\xd6\xe9\xa8\x97\xf7\xe4\xa1\x95\xf7\x4a\x53\x45\x27\x0e\xc7\xe2\x36\xb4\xba\x1a\x91\xe2\xb5\x35\x70\xf7\x6c\xe1\x95\x1e\x8d\x04\x6e\xca\x3b\xcc\xf4\x0d\x7f\xb7\xd3\xf1\x1a\x23\xf9\xcc\xd9\x6d\x8d\x76\x24\xae\x60\x01\x51\x5a\xa0\x76\xa6\x5b\xe5\x4e\x31\x62\xec\xbb\x41\x5c\x75\x4f\x07\xe8\xde\x0e\x74\xf5\xd8\x93\xb8\x2c\xcb\x42\x4b\x28\xa4\xfe\x3a\x66\x4e\xdf\x9a\x96\x1a\x0e\x60\x6c\x18\x16\x8c\x29\xb4\x18\x61\x5c\x3e\x8a\xb1\xe3\x67\xd7\x12\xf5\x9b\x27\x6b\x11\xb9\x4c\xcb\x0a\xc9\x6f\x07\x5a\xa7\x43\x4f\x21\xa3\x88\x99\x00\x61\xec\x1c\xcb\x96\xb1\x29\x10\xed\x93\x4c\xf4\xbb\xf6\x7d\x34\xe5\xdf\x30\x08\x5c\x73\x66\xb6\x34\x0d\x18\x9c\x6c\xc5\xd4\xc3\xd8\x75\x5f\xd9\x0a\x41\x95\x6e\x54\xfb\xda\x4f\x91\x30\x08\x9e\xd8\x86\xf7\x4e\x9a\x4d\xbe\x66\x04\xc1\xb8\x14\xb9\xc0\xd9\xed\x60\xa8\xbe\xde\xb0\x00\x25\x6a\xd4\xbb\x0e\xbc\x5f\x04\x81\x75\x93\x8b\x16\xe7\x1e\x13\xb7\x45\xb9\x45\x01\xb3\xb6\xbd\x25\xbf\xea\x06\xb0\x67\x59\xe2\x37\xcc\x4f\xb4\x9b\xcd\x03\x82\x3e\xb7\xb4\xbf\x2b\x2a\xe8\x06\x15\x0a\x5d\x5a\xf3\x82\xa5\x4a\xda\xea\xa4\x27\x5b\x1d\xcc\x0e\x13\x35\x81\x83\x0b\x6f\xb5\x02\x03\x37\xb5\x49\x72\x17\xb9\x4f\x9f\x1f\x46\x5d\x96\xc9\xd7\x43\x40\x3f\x99\x94\x8b\x60\xa6\xdb\xe4\xba\xa0\xa2\x75\xca\x3f\x9d\x97\x12\x88\xde\x5c\xd8\x90\x6c\x21\xf6\x72\x9a\xc6\x06\x3a\x3e\x0f\x66\x58\x3e\x00\xcb\xe4\x33\xd1\xee\x0e\x9d\xb1\xcc\x3c\x69\x8d\xd2\xf5\x40\x18\xb0\x7c\x8f\x94\x5c\xa1\x99\x8e\x84\x8e\xa1\xf6\x43\xe8\xe8\x46\xd8\xd0\x1b\x9c\x1d\xab\x23\x89\x16\xaa\xfd\x18\xe8\x4b\x0a\x33\xe9\x6a\xb2\x52\x1b\xf4\xec\x13\xaf\x58\x26\xaf\xd8\xf5\x35\x2c\xa0\x2d\x54\x4d\x7b\xc2\xb1\x38\x9e\x4a\xed\x36\x12\x9e\x92\xdb\x1e\xf5\x7d\xc4\xe5\x8b\x66\xb6\x8d\xe7\xa6\xd1\x68\x9f\xec\x4b\x3d\x84\x78\x26\xb5\x61\x06\x8e\xab\xeb\x11\x18\xa7\xe6\xcd\xc7\x0b\x4e\x12\x5f\x29\x0c\x1a\x11\xeb\xc8\x47\xa7\x17\xb3\xab\xec\xfc\x02\xa2\xbf\xf7\x58\xc7\x5e\x29\x0c\x92\x76\xbe\x69\x3a\x40\x5b\xc5\x2d\xaa\x1a\x29\xbf\x48\xe3\xe5\xa7\xbb\x41\xf2\xe6\xe2\x11\xe8\xc8\x7e\x12\xd8\x17\x57\x5f\x99\x06\x0c\x75\x98\xa0\x7c\x37\x23\xcd\x4d\xb9\xbb\xfe\x90\x43\x9c\xd4\xbf\x21\x39\x42\xf7\xff\x0a\x70\x74\xe4\xff\x07\x70\xe6\xa7\xff\x81\xa2\xec\xcd\xb7\xd7\xb1\x76\x7f\x9f\xb1\xdc\xa2\xb6\xf5\x3e\xdb\xef\x1a\x86\x2d\xce\x99\xa3\x2c\x53\x24\xe3\x9c\x7c\x69\x15\x74\xa0\x4c\xa4\xbc\xed\x7d\x46\x2d\x4c\xaf\x1b\x1d\xb5\xb0\xbd\x7f\x6b\xb8\xad\xed\x21\xda\xf7\x27\x4f\x11\xbf\xdf\xf2\x0e\xa0\x35\x3e\xd2\xf1\x98\x6f\x14\x79\xa5\xdb\xf2\x7c\x78\x29\x6b\xfd\x0e\x39\x65\x05\x66\x06\x2f\xd3\x04\xc2\xb7\xc8\x1e\xe8\xf2\xe0\x5b\x74\x0e\x3f\xdd\x45\xe6\xd6\xd0\xd6\x98\x2e\x2a\xce\xc6\x8d\xcb\xe3\x1d\xd9\xa0\x1b\x68\x9d\xea\x33\x67\x6c\xf9\xb8\x37\x4c\xe0\x4f\xf0\xab\xb5\x78\xca\xe0\x43\xb7\xd0\x4d\x5d\x28\x56\x15\x08\x54\x4a\xb6\xe2\x1b\xe4\x4a\xea\x7b\x16\x85\xda\x2a\x62\x28\xc5\xda\x8e\x9d\xed\x51\xd2\x55\x35\x96\x03\x2f\x95\x9e\x7e\x5f\x29\x56\x72\x5a\xf8\x3c\x9d\x88\x89\x63\x0d\xc1\xcf\x3f\x4f\x86\xd0\x5e\x17\xbc\x78\x0c\xdd\x43\xc6\x9a\xc3\xed\x13\xd5\xe3\xd6\x79\xf3\xda\xba\x73\x08\x59\x9f\xf1\xff\x0a\x00\x00\xff\xff\xe9\x15\x2a\x17\xc5\x1b\x00\x00") + +func templateBuilderUpdateTmplBytes() ([]byte, error) { + return bindataRead( + _templateBuilderUpdateTmpl, + "template/builder/update.tmpl", + ) +} + +func templateBuilderUpdateTmpl() (*asset, error) { + bytes, err := templateBuilderUpdateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/builder/update.tmpl", size: 7109, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateClientTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x58\x4b\x73\xdb\x36\x10\x3e\x93\xbf\x62\xcb\x71\x3a\xa4\x47\x81\x6a\x27\x27\x75\x7c\xa9\xe5\xce\xa8\x8d\xad\x38\x76\xd3\x43\xa7\x93\x81\x49\x90\x42\x42\x81\x32\x08\xd9\xf2\x30\xfa\xef\x9d\xc5\x83\x0f\xbd\x4c\x3b\x3d\x59\x78\xec\xee\xb7\xdf\x7e\x58\x80\xae\x2a\x48\x58\xca\x05\x83\x20\xce\x39\x13\x2a\x80\xf5\xda\xf7\xab\x0a\x8e\x16\xdf\x32\x18\x9d\xc1\x1d\x2d\x19\x1c\x91\xf3\x42\xa4\x3c\x23\x1f\x69\xfc\x8d\x66\x0c\x37\x55\x15\x28\x36\x5f\xe4\x54\x31\x08\x66\x8c\x26\x4c\x06\xc6\x0a\x3d\xf0\xf9\xa2\x90\x0a\x42\xdf\x0b\xf2\x22\x0b\x7c\xdf\x0b\xd0\xe9\xb6\x9f\xe1\x9c\x67\x92\x2a\x16\xf8\x5e\x55\x81\xa4\x22\x63\x70\xf4\x65\x00\x47\x02\xa3\x1f\x91\xab\x22\x61\x25\xba\xf4\x8c\x07\xb1\xc3\x85\x99\x6f\x26\xb4\xaf\xb7\xc0\x44\x82\x86\x91\xef\x0f\x87\x70\xae\xd3\x03\x5e\x82\x9a\x31\x30\xc9\x82\x9a\x51\x05\xb3\x22\x4f\x4a\xa0\x79\x0e\x38\x75\xb7\xe4\x79\xc2\x64\x49\x7c\xf5\xb4\x60\xce\xac\x54\x72\x19\x2b\xa8\x7c\x2f\xd6\xd1\x7d\x6f\x38\x84\x9b\x78\xc6\xe6\x74\xc3\x65\x5a\x48\x88\x25\xa3\x8a\x8b\x6c\x00\x26\x39\x2e\x32\xa0\x22\x81\x44\x16\x8b\x05\x0e\x4a\x6d\x49\x7c\xcf\xba\x38\xb6\x24\x10\x33\x3e\x48\xc5\x5b\xcd\xc5\x70\x08\x26\xeb\x2b\x3a\xc7\x94\x77\xa0\xe0\x42\x31\x49\x63\x1d\xfd\x91\xab\x99\x5e\xef\x1a\x35\xc9\x7a\x5e\x77\xe5\xb8\x33\x34\x2c\x68\x58\x96\xd4\xb5\x26\xf5\x8a\x3d\x5a\x82\x74\xca\xac\x04\x0a\x82\x3d\x3a\x14\x86\xab\xa5\x64\x49\x03\x20\xe3\x0f\x4c\x40\xb1\x50\xbc\x10\x25\xf1\xd3\xa5\x88\x1b\x37\x61\xb1\x50\x25\x10\x42\xa6\x7a\x3d\x82\x63\xeb\x1e\x89\x47\x12\x8c\xc7\x2a\x2f\xb2\x11\xe4\x45\x46\x3e\x4a\x2e\x54\x2e\xd6\xbe\x17\x13\xeb\x53\xfb\x20\x84\x44\xbe\x27\x99\x5a\x4a\x01\x3f\x1b\x27\x95\xef\xd9\xea\x8d\x20\x1e\xf8\x9e\x25\x7f\x04\x8e\xfc\x2b\xf6\x68\xa6\xc2\x98\x24\x92\x3f\x30\x19\x0d\x0c\x31\x87\x6b\xd1\xa5\x6e\x84\xe9\xec\x60\x2f\x8c\x9d\x37\xa4\x50\x5b\x3a\x1a\x6f\x57\x60\xa0\x3a\xfe\x94\xa4\xa2\xc4\xd2\x15\x82\xe6\x96\x4d\xcb\x55\x18\x3b\x52\x22\xb8\x5d\x85\xb1\x5a\x21\x29\x8a\xad\x14\x1e\x0c\xfc\x1b\x41\x78\x7c\xbb\x1a\x00\x93\xb2\x90\x11\x52\xc7\x53\xf8\x32\x80\xe2\x9b\x66\xd0\xa6\x46\xc2\x63\xb5\x1a\x9b\x2c\x7f\xc5\x35\xa4\xc7\x12\x26\x78\x3e\x80\x74\xae\xc8\x05\xba\x48\xc3\xc0\xf5\x03\xcc\x2e\xa6\x42\x14\x78\x22\xa8\x54\x40\xdb\x50\x75\x91\xb9\xe8\x4e\x06\x11\xe6\xe9\x29\x03\x08\x11\x08\xf6\x68\x80\x0f\x6a\x30\x91\xc6\x88\xeb\x3f\x9d\x61\xf4\xde\x60\x34\x0a\x7d\xbc\xda\x31\x47\xf0\xe6\x21\xd0\xf1\x4c\xf0\x38\xcd\x5a\xe2\x31\x21\x47\x80\x00\xb4\x90\x62\x92\x17\xd9\x00\x1e\x98\xbc\x2b\x4a\x86\x63\xfb\x73\xdd\x48\xe8\x76\xd5\x91\x4f\x9a\xfd\xaf\xca\x48\xb3\x6d\x6d\x0c\x30\x73\xdf\x34\xe3\x43\x51\xd0\xa1\x3d\x6f\xa3\x33\x58\xe0\x81\xa8\x23\x04\xe7\x4d\x53\xb7\x1d\xc3\x6e\x35\x1d\x83\xb6\xfb\xc5\x76\x7b\x70\x6d\x4a\x77\xc2\xae\xf1\x56\x43\xb4\xb7\x86\x64\xfa\x9c\x1e\x09\xf2\x89\xc5\x0c\x89\x86\xf5\xba\xaa\x00\xcb\x7b\x6f\x96\x83\x38\x30\x73\x7a\x64\x21\xa7\x10\xbc\x21\xa7\x65\x50\x87\xff\x0e\x79\xf1\xe8\xac\x6d\xd3\xb1\x2d\xa7\x8b\xa4\x39\x39\x07\x73\x49\x65\x31\x6f\x35\x20\x83\xba\xe9\x3f\x1d\x9f\x61\x6c\xd7\x23\xd3\x07\x9b\x60\x55\xa3\x88\xce\x42\x55\x2b\xc3\x9d\xe9\x73\xdd\x10\xdb\xe8\xcc\x84\x6d\xb9\x1a\x65\x07\x61\xeb\x7c\x77\x5c\x47\xd6\x55\x18\x6d\x36\x65\xe3\x70\x03\xd2\xc6\x72\x03\x8c\x98\x5f\x0e\xdf\x5f\x8b\xa4\x83\x4f\xc0\xd2\xcc\xbc\x02\xa0\xf1\xb5\x05\xd0\x86\xd8\x07\xd0\x2c\x3f\x03\x70\x2a\x9e\xc3\xd8\xd4\x94\x09\xc5\xd5\xd3\x73\x30\xa7\x82\x85\x4e\x7c\x5b\x17\xdd\xee\x14\x10\x44\x93\x45\x4c\xea\xd9\xc9\xb8\xe5\x8a\x4c\xc6\xd1\x26\xf6\xc9\xb8\x37\x7a\x9e\xf4\x40\x3e\x19\x87\x3c\xb1\x65\x99\x8c\xc9\x2d\x1e\xcc\x7e\xa8\x77\x71\x3f\x15\xdb\xf4\x0f\x80\x27\x23\xe0\x89\x2b\xc3\x98\xe5\xac\xa3\xe3\xc4\x4c\xbc\x42\x26\xc6\xd5\x96\x4c\x6c\x84\x7d\x50\xcd\xf2\x5e\x99\x98\xe5\x8e\x4c\x76\x41\xec\xaf\x92\xda\x61\x7f\x95\x34\x18\xda\x2a\xa9\x67\xf7\xa9\xa4\xb5\xa1\x2f\xf8\x43\x22\x69\xc7\xeb\x21\x92\x5d\xa0\x77\x31\xaf\x45\x42\x5c\xed\xc8\xdf\x33\x26\x0d\x35\xed\x17\x37\xd1\x31\xa3\x68\x6f\xf7\xbb\x5f\x32\xf9\xf4\x1a\xd1\x5c\xa3\xe1\x96\x66\xf4\xec\x5e\xe0\x7a\x75\xb7\x62\xba\x97\x29\xb3\x77\xd5\x45\x92\x99\x8f\x0c\x74\xe3\x40\xd6\xb7\x69\xb8\xa0\x65\x4c\x73\x38\x62\x9a\x4b\x1d\x25\x82\x40\x47\x71\x57\xab\x09\x59\x41\xb3\xd5\x5d\x3c\x98\x39\x67\x65\x7d\x25\x35\x2b\x2c\xc9\x18\x14\x29\xd0\x17\x71\xb1\x2b\xc8\xb3\x52\x75\x39\x99\x2b\xcc\x14\x63\x74\x66\x68\x6b\x65\x75\x80\x3b\xaf\x7c\xe4\x2a\x9e\x35\xef\xc7\x31\xa7\x39\x8b\x55\xa8\x5f\x98\x5e\x8c\x9f\x89\x89\x99\x22\x97\x4f\x37\xd7\x1f\x06\xf5\xf0\xe6\xfa\x03\x57\x6c\x84\xef\x21\x9e\x60\xd8\xd6\x69\xc0\xef\x34\x9e\x5a\xa5\x4e\xca\x1b\x25\xf1\x35\xb7\x5e\xf3\x24\x8c\xf0\xe2\xcf\x4b\x04\x35\x19\x37\x8f\x00\xfd\xaa\x32\x46\x8c\x5c\x9e\x5e\x82\x9d\xd4\x6f\x2d\x8e\xee\x4f\xec\xf3\xe2\x2b\x0e\x7e\xd1\x03\xb7\x7f\x52\x4e\xc4\x03\x93\xda\xa9\xd9\xef\x76\xe0\xf6\xda\xb4\x7e\x85\x79\x9e\xa7\x4e\xd0\x4d\x79\x9f\x93\x5b\x7a\x97\x1b\xe9\x5b\xca\x5a\xfa\xd7\x6b\x91\x31\x38\xdd\x36\x10\xfb\xf6\xbe\x7b\x66\xaf\x8d\x86\x8b\xe7\x85\x28\x15\x35\x52\x30\xc6\xef\x9d\xf1\x0d\xd3\xa5\x50\xef\xc8\xf9\x3e\x17\x1f\xff\x6c\xd9\xff\x63\x72\x5f\xaf\xff\x8d\x22\xa2\x7d\x79\xde\xef\xb2\x98\x87\xea\x5d\x3d\xfe\xa3\xe0\x22\x54\xa7\xf5\x78\x2a\x5e\xe8\xff\xab\xf6\x3f\x00\x75\xba\xcf\x4a\x17\xbd\x9d\x56\x1d\xcc\x74\x19\x4c\xed\xe2\x3a\x7c\x81\x03\xbc\xbd\x22\xc3\x8e\x16\x39\x29\xef\x73\xe8\x70\xe4\x42\x98\x74\x4f\xdc\xd0\x64\xfb\xde\x0d\x31\xd9\x13\x1b\x75\xbb\xd8\xad\xd9\xad\xf8\xea\xfd\x2b\x6a\xe0\x44\xad\xe5\xce\x53\x28\xa4\x11\xf7\x14\x42\x2a\x12\xfc\x3d\x3d\x9d\x76\xf4\x1b\x69\xa1\x0e\x8f\x01\x37\x7d\xff\x0e\x21\x6e\xd0\x1f\xd9\xdc\x0a\x1c\xbb\x4b\x04\xc7\xc3\x1f\x97\xb1\x65\x6e\x4f\x52\xe7\x45\xbe\x9c\x8b\x36\x09\x6d\x86\x5f\x2e\x6d\x67\xde\x91\x40\xaf\xea\xf7\x29\x7e\x5d\xf6\x5a\xde\x3f\x50\xea\xbd\xba\xdc\xc5\x4a\xb7\xc8\xb6\x7a\xd3\xd3\xcb\x6e\xf5\x68\x59\x16\xf1\x66\xed\x0e\x67\xd3\xaf\xa6\xfd\x69\xdd\x01\xbe\x61\xb6\xf5\x7f\xb5\x8d\xa6\x7f\xc5\x16\x6a\x29\x4c\x9b\x6f\x1a\xee\x0d\xcb\xd3\x4f\x2c\x85\x4e\x26\x99\x64\xf3\x9c\x0b\x38\x83\x8c\x7c\xde\x78\x1b\x91\xdf\x0a\x35\xdb\x0f\xac\xdb\x02\xdb\x67\x66\xa3\xbb\xf7\x0d\x37\x11\x17\x07\x4b\xdf\x11\xf6\x74\xa9\x3e\x87\x9b\x85\xec\x1b\x69\xba\x54\x17\x7d\x12\x23\x13\xd1\x0e\x62\xc9\x6e\xfe\x17\xa1\x43\xf9\xfa\xad\xe2\xbe\x8b\x9b\x9f\xad\xc9\xff\x02\x00\x00\xff\xff\x81\xbc\xee\x4b\xdc\x15\x00\x00") + +func templateClientTmplBytes() ([]byte, error) { + return bindataRead( + _templateClientTmpl, + "template/client.tmpl", + ) +} + +func templateClientTmpl() (*asset, error) { + bytes, err := templateClientTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/client.tmpl", size: 5596, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateConfigTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x53\xbd\x8e\xdb\x3c\x10\xac\xc5\xa7\x58\x18\x2e\xe4\x0f\x07\xba\xff\x00\x57\x49\x79\x40\x52\xa5\x0d\x28\x6a\xc5\x63\x4e\xe6\x2a\xe4\xea\x90\x03\xa1\x77\x0f\xf8\x77\x52\x80\x2b\xdc\x59\xdc\x99\xd9\xd9\xd9\x75\x8c\x30\xe2\x64\x1d\xc2\x49\x93\x9b\xac\x39\xc1\xb6\x09\x11\x23\x9c\x97\x57\x03\xff\xdf\x60\x50\x01\xe1\x2c\xbf\xe4\xaa\xfc\xae\xf4\xab\x32\x98\x40\x31\x02\xe3\x7d\x99\x15\x23\x9c\x5e\x50\x8d\xe8\x4f\x85\x55\x15\xf6\xaa\xbd\x2f\xe4\xf9\x04\xe7\x5c\xba\x5e\xe1\xdb\xc2\x96\x1c\x4c\xab\xd3\xf9\x07\x13\x94\xf6\xab\x47\xe0\x17\x04\x3d\x5b\x74\x2c\x05\xbf\x2f\x78\x44\xf7\xff\x15\xdc\x25\xcb\x14\x53\x60\x43\xe1\x54\x05\x55\xd0\xe4\x0f\x4a\xa0\xdc\x08\x96\x03\x0c\xab\x9d\x47\xf4\x55\xb9\x50\x20\xb0\x5f\x35\x43\x14\xdd\xf5\x0a\xa3\xb7\x6f\xe8\x9b\x68\xfd\x5a\x03\x8e\x59\x12\xff\xa0\x5e\x19\x61\x54\xac\x72\x34\x1e\x7f\xaf\x18\x38\x48\xd1\x55\xec\x68\xd5\x8c\x9a\xe5\xd7\xfc\x99\x35\xdf\xd0\x0f\x14\x10\xd0\xa9\x61\x46\x50\xf5\xc1\xf2\x3b\xcc\x64\x8c\x75\x46\x8a\xae\x81\x06\xa2\x39\xb3\x66\x32\x7b\xe3\x8a\x03\x72\x1f\x6a\x77\x1a\x51\x8a\x2e\xc1\x72\x36\x52\x4a\xeb\x18\xfd\xa4\x34\xc6\xed\x22\x8e\x59\x07\x50\xcb\x32\x5b\x2c\x53\x51\x7d\x4b\xd1\x7f\x24\x07\x34\xfc\x4a\xbe\x45\x12\x83\x5e\x43\xcb\xba\xc1\x7b\x5a\x38\x80\x94\xb2\x48\x5e\x52\x60\xc9\xda\xcf\xa7\x84\x48\xc7\xe2\x95\x33\x59\x3d\xa4\x5a\x47\x0b\xf7\xfa\x22\xba\x4d\x74\x76\x02\x2d\x9b\xf1\x54\xd3\xb2\xc6\x75\xdb\x03\xc3\x61\x35\x7d\x2b\x3c\x81\x96\x33\x99\x4c\x2f\x93\xfc\xa8\xf4\x80\x1c\x8e\xbb\x6d\xd1\x30\xb5\x68\xea\x0c\x95\xd0\x5f\xda\x09\x45\xd1\x79\xe4\xd5\xd7\x63\x3a\x8c\x58\x2c\x35\x83\x37\x60\xbf\xe2\xde\xf9\x99\xcc\xe3\x5d\x9f\xc9\xf4\x93\xfb\x74\x25\x0f\x1b\x49\x3b\xbd\xc1\xe4\x76\x0b\xe5\x9a\xf6\xbf\xc9\x3f\x5e\x4a\x62\xb5\x7f\x41\xf6\x9f\x5e\xe3\xc3\x06\xf6\xe5\xd4\x2b\xce\x3e\x62\x04\x74\x23\x6c\xdb\xdf\x00\x00\x00\xff\xff\x98\x41\xc9\xad\x38\x04\x00\x00") + +func templateConfigTmplBytes() ([]byte, error) { + return bindataRead( + _templateConfigTmpl, + "template/config.tmpl", + ) +} + +func templateConfigTmpl() (*asset, error) { + bytes, err := templateConfigTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/config.tmpl", size: 1080, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectGremlinCreateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\x4b\x6f\xe3\x36\x10\x3e\x4b\xbf\x62\x6a\x78\x17\x54\xa0\x32\xe9\x35\x85\x0f\xdb\xd4\x8b\x35\xd0\x66\x1f\x49\x73\x59\x04\x01\x43\x8e\xb4\x44\x64\x4a\x4b\x52\xde\x04\x82\xfe\x7b\x31\x94\x6c\xc9\xb2\x5d\xf4\xb5\x27\x8b\xe4\x3c\xbe\xf9\xe6\x1b\xd2\x4d\x03\x0a\x33\x6d\x10\x66\x4a\x8b\x02\xa5\x3f\xcf\x2d\xae\x0b\x6d\xce\xa5\x45\xe1\x71\x06\x6d\x1b\x37\x0d\xcc\x1f\x6b\x5d\x28\xb4\x70\xb9\x80\x4a\x38\x29\x0a\x98\xf3\x1b\x59\x56\xc8\x7f\xe9\x4f\x7a\x43\x8b\x12\xf5\xa6\xb3\xdc\x7d\xef\xdc\xdb\x36\x8e\xb3\xda\x48\x60\x7b\xb6\x6d\x0b\x67\xe3\x2c\x6d\x9b\x40\x0f\xe4\x46\x6c\x90\x49\xff\x0c\xb2\x34\x1e\x9f\x3d\xbf\xea\x7e\x13\x60\xc1\x85\x5f\x8b\x35\x42\xdb\xa6\x80\xd6\x96\x36\x81\x26\x06\x00\xb0\xe8\x08\xc1\xeb\x3e\x0a\xff\x84\xae\x2a\x8d\xc3\xa6\x0d\xc7\x5f\x6b\xb4\x2f\x29\x3c\x6a\xa3\xb4\xc9\x83\xe9\x04\x10\xef\x3d\x59\xc2\x3f\x92\x31\x4b\xe2\x48\x67\x94\xe4\x98\xb1\xb2\xf4\xc5\x97\xcf\x28\x09\x6c\x3a\x4d\x90\x12\xa0\xe4\xe7\xe0\xfe\xc3\x02\x8c\x2e\xa0\x89\xa3\xc8\xa2\xaf\xad\xa1\x65\x80\x1f\x47\xed\x36\x49\x0a\xe5\x13\x25\xd2\xee\xaa\x34\xce\x0b\xe3\x97\x54\x1e\xeb\xc2\x94\x4f\x27\xdd\x03\x27\x9f\x46\xc4\x12\x0b\x63\xa2\x1a\x59\x9a\x4c\xe7\x97\x07\x35\x74\xfb\xed\xb4\xcc\x71\x30\xfe\xd6\x96\xeb\x2d\x95\xec\x6f\x97\xd4\xef\x4d\xa3\xa5\x64\x15\xff\x63\x45\xb0\x04\xce\x94\x2b\xf8\xad\x15\x1b\xb4\x4e\x84\xbc\x4d\xf3\x23\x7c\xd3\xfe\x0b\xf0\xeb\x7a\x1d\x28\xb3\x42\x1b\x4f\x82\x8b\x22\xff\x52\x21\xe9\x67\xbb\xe9\xbc\xad\xa5\x0f\x70\xa3\xca\xa2\x9a\xc6\x3b\x3f\x1f\x5b\x93\x85\x96\xc2\x23\x27\x7b\x8f\xce\x1f\xb1\x0f\xdb\x6b\xe1\xe5\x17\x74\x20\x8c\x02\xed\x5d\x17\x44\x18\x4f\x8e\x84\x63\x08\x1a\x14\xb7\x16\x4f\xc8\x3e\xdf\x9f\x0d\xdb\x29\x5c\xa4\x44\x13\xa7\x7a\xbb\xa2\xd0\xa8\x50\xc4\x86\x3c\x72\xfe\x46\xa9\xbb\xc0\x14\xff\x20\xe4\x93\xc8\xa9\xa3\xfc\x37\xf1\x88\x45\x6f\x6f\x85\xc9\x11\xe6\x0f\x29\xcc\x33\x72\x99\xf3\xb7\x1a\x0b\xe5\x3a\x26\x74\x76\xd0\x76\x72\x9a\x67\xfc\x26\x70\x12\x6c\x89\xfc\x51\x47\x43\x58\x9d\x91\xd1\x1f\x46\x7f\xad\xb1\x0b\xb5\x5f\xce\x02\x44\x55\xa1\x51\x6c\xb4\x99\xc2\xeb\x61\x15\x22\x75\x74\x5f\x42\xce\xef\x58\xc2\xdf\x09\x77\xbc\x94\x14\xa6\xdb\xb4\xce\xf8\x76\x14\x82\x76\xce\x0e\x0b\x39\xac\x23\xe1\x57\x65\x6d\x3c\x4b\xd2\x2e\x3d\xb5\xe9\x12\x1e\x1e\xf8\xca\xb1\x8a\x5f\x2f\x3f\xb2\x8b\x24\xd9\xc5\x65\xd7\xf8\x6d\x69\x6d\x57\x65\x08\xf1\xdd\xf1\xf5\xc0\xa8\xdb\xd1\x5e\xbf\xa3\x68\xc3\x3f\xd8\xb2\x42\xeb\x5f\x18\xc9\xed\x46\x9b\xbc\xc0\xff\x31\x75\x27\xca\x71\xce\x89\x7e\xb0\xd3\xcf\x52\xe5\xd8\xcb\x27\x68\x45\x69\x8b\xd2\xeb\xd2\xd0\xf1\x6c\x65\x66\xa3\x33\x43\x77\x0c\xbd\x11\x56\x1b\x9f\xc1\xec\x95\xe3\xaf\xdc\x6c\x04\x78\x8e\x63\xa8\x71\x14\x65\xa5\x05\xad\xc2\x6b\x11\x32\x1f\x83\x8e\x53\x75\xee\xc9\x12\xf9\xca\xad\x0c\x8d\xe2\x4e\x99\x13\x9c\x0b\x98\xbd\xaf\xfd\x6c\xef\x34\x20\x3d\x04\x8a\xfc\xf6\xa5\xc2\xd3\x70\xa9\x2d\x6f\x94\x5a\x06\x61\x98\xee\x46\x4d\xc2\xb5\xc8\x48\xd4\x5a\x25\x09\x5f\x99\x3b\x36\xf4\xb3\x18\x60\x1d\x73\xbd\x2d\x07\xc7\xf7\xb5\x1f\x7b\xee\x94\x30\x54\xfa\x4e\xb8\xe9\xe5\xf6\xdf\xe6\x70\xd9\xcd\x61\x90\xf5\x3e\x30\x5a\x0c\x14\xb6\x6d\x3f\xb1\xab\x5f\x09\xeb\xbf\x1f\x2b\x52\xd3\x5f\x4d\x95\xd9\x3e\xe7\xc4\xc8\xc9\xe1\x38\x22\xdc\x93\xb7\xbf\xce\xa0\x40\x33\x26\x24\x81\xc5\x02\x2e\x3a\x15\xf5\x6f\xd3\x86\xdf\x89\xa2\xc6\xdf\x45\xc5\xbc\xad\xb1\x1f\x8e\xc8\x87\x67\x70\xe4\xfa\xf9\xe2\x9e\x13\x77\xfc\xaa\x14\x05\x3a\x89\x6c\x72\x48\x54\xa4\x07\xe1\x92\x5e\xe9\x0f\x29\x48\x3b\x88\x7d\xec\xfb\xd3\xe5\x7d\x87\xc8\x5b\x58\x80\xb4\xd3\x34\xb6\x0f\xed\xed\x16\x5c\x0f\xdd\xdb\x78\xa2\xb4\x93\x35\x8d\x38\x0b\xff\xd6\xfa\xef\x3f\x03\x00\x00\xff\xff\xf5\x30\xf6\x67\x07\x0a\x00\x00") + +func templateDialectGremlinCreateTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectGremlinCreateTmpl, + "template/dialect/gremlin/create.tmpl", + ) +} + +func templateDialectGremlinCreateTmpl() (*asset, error) { + bytes, err := templateDialectGremlinCreateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/gremlin/create.tmpl", size: 2567, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectGremlinDeleteTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x90\x4f\x6b\xe3\x30\x10\xc5\xcf\xd6\xa7\x18\x42\x58\xac\x60\x94\xfb\xc2\x5e\x76\xb7\xb4\x87\x1e\xfa\x8f\x5e\x8b\x22\x4d\x8c\xa8\x2a\xab\x23\x39\xa4\x88\xf9\xee\x45\x8e\x1b\xd2\xa6\xf4\xa4\x41\x7a\xf3\xde\xef\xa9\x14\xb0\xb8\x75\x01\x61\x61\x9d\xf6\x68\xf2\xba\x27\x7c\xf1\x2e\xac\x2d\x7a\xcc\xb8\x00\x66\x51\x0a\x2c\x37\xa3\xf3\x16\x09\x7e\xff\x81\xa8\x93\xd1\x1e\x96\xea\xde\x0c\x11\xd5\xdf\xf9\x65\x16\x12\x1a\x74\xbb\x83\xf2\x38\x1f\xd7\x99\x85\xd8\x8e\xc1\x40\x7b\xaa\x65\x86\xd5\x69\x08\xb3\x84\x99\xe3\x62\x8f\xa6\x35\x79\x0f\x66\x08\x19\xf7\x59\xfd\x3b\x9c\x12\x90\x68\x20\x28\xa2\x21\x4c\x35\xec\xd7\xbc\xa1\xee\x30\xc5\x21\x24\x2c\x2c\x9a\xd7\x11\xe9\xad\x83\x8d\x0b\xd6\x85\x7e\xd2\x7d\x82\x64\x56\xf3\x5a\x2b\xd5\x6d\x15\xb7\xb2\x3a\xe6\x91\xc2\x99\xd2\x52\x9d\xd4\x07\x52\x07\x5f\xdc\x3b\x20\x4c\x52\xb0\xf8\xae\x23\xfc\x50\xb2\x95\xb0\xb2\xc9\xab\x07\xd2\x3b\xa4\xa4\x7d\xad\x95\x2b\x6c\xaf\x1e\x5b\xa9\xae\x74\xba\xd6\x1b\xf4\x93\xa1\xba\xd1\xe6\x59\xf7\x58\x89\xa6\x5b\x29\x9a\xed\x40\xf0\xd4\x41\x9c\x3e\x5d\x87\x1e\xcf\xd8\x23\xa1\x75\x46\x67\x4c\xd5\xbb\x89\xea\x72\x8e\xce\x52\x34\x7c\xac\x9c\xd5\x7f\x1a\x62\x5b\x4b\x94\x02\x18\x2c\x30\xbf\x07\x00\x00\xff\xff\x55\xc0\xf5\x1a\x25\x02\x00\x00") + +func templateDialectGremlinDeleteTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectGremlinDeleteTmpl, + "template/dialect/gremlin/delete.tmpl", + ) +} + +func templateDialectGremlinDeleteTmpl() (*asset, error) { + bytes, err := templateDialectGremlinDeleteTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/gremlin/delete.tmpl", size: 549, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectGremlinGroupTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x54\xcd\x6e\xdc\x2c\x14\x5d\xc3\x53\xdc\xcf\x8a\x3e\x41\x6a\x11\x75\xdb\x6a\x16\x4d\x7f\xb2\x4a\xa5\x66\xda\x6e\xaa\x6a\x44\xe0\x32\xb5\xe4\xc1\x2e\x60\x6b\x22\x8b\x77\xaf\xc0\xcc\xc8\x99\x99\x54\xea\x0a\xb8\x3f\xe7\x9c\xcb\xc1\x9e\x26\xd0\x68\x1a\x8b\x50\xe9\x46\xb6\xa8\xc2\xcd\xd6\xe1\xae\x6d\xec\xcd\xd6\x75\x43\x5f\x41\x8c\x74\x9a\xe0\xea\x71\x68\x5a\x8d\x0e\xde\xac\xa0\x97\x5e\xc9\x16\xae\xc4\x5a\x75\x3d\x8a\xdb\x92\x29\x85\x0e\x15\x36\xe3\x5c\x79\xdc\x1f\xdb\x63\xa4\xd4\x0c\x56\x01\x7b\x56\x1b\x23\x5c\x2f\x59\x62\xe4\x50\x74\xac\x95\xb4\x4c\x85\x3d\xa8\xce\x06\xdc\x07\xf1\x7e\x5e\x6b\x18\xa1\xb1\x01\x9d\x91\x0a\xa7\xc8\x01\x9d\xeb\x1c\x4c\x94\x38\xf4\x89\xfc\xff\x02\x20\x1e\xd0\xf7\x9d\xf5\x38\x45\x4a\x7e\x0f\xe8\x9e\x6a\x78\x6c\xac\x6e\xec\x36\xd7\x9d\x08\x11\xa5\xed\x4b\xaa\x64\x5c\x94\x95\x92\xc6\x24\x8a\x4b\x1d\xda\xa5\x9d\xf8\xb8\x47\x95\x94\xd6\x70\xc2\x52\x83\x43\xcf\xdf\xe6\xf6\xff\x56\x60\x9b\x36\xc9\x24\x0e\xc3\xe0\x6c\x8a\x52\x12\x33\x7e\x8b\xf6\xf4\x5e\x84\x69\xb0\xd5\x9e\xbf\xba\x98\xb3\x9e\xc3\x6a\x05\xaf\x97\x78\x0e\xbd\x78\x40\xa9\xbf\xcb\x96\x8d\x3c\x43\x8f\xbb\xfa\xa0\x7d\x91\x1d\xf0\x5e\xf6\x8b\xc9\x5e\x96\x56\x8e\xe3\x4e\x7c\x40\xd5\x69\x4c\xb8\x91\xfe\xab\x93\xe5\x26\xe1\x5a\xfb\x56\x7c\x75\x72\x44\xe7\x65\xe6\x1b\xa5\x03\x46\x09\x09\xce\xc3\x8f\x9f\x0b\x57\x29\x21\x56\xee\xf0\x2c\xca\x29\x31\x9d\x83\x4d\x0d\xc6\xe6\xa9\xa4\xdd\xe2\x99\x2f\xc6\xfa\x3c\x4d\x82\xa8\x21\xe4\xf9\x8d\x15\x77\xb3\x1c\x56\xf5\x55\x0d\x55\xc5\x0b\xf1\x0a\x64\xdf\xa3\xd5\x2c\x38\x9f\xaa\xf9\x91\xfc\x98\xc9\xc7\x1a\xd2\x32\x5f\xec\x41\xc4\x5f\x34\x64\xfb\x8e\x32\xce\xc1\xcc\x65\xfe\xcd\x46\xbc\xf3\x49\x22\x17\xdf\xac\xe9\x5a\xcd\xb8\xc8\x9e\x79\x66\x78\x4a\x19\xce\x97\xde\xbc\xf0\x8a\xc5\x5d\xfa\x8a\x19\x17\x94\x10\x42\x6e\x9f\xd8\x66\x73\x80\xb9\xac\x54\x08\xc1\xc5\xa7\xcc\xf7\xac\x69\x0e\x89\x7b\x19\xd4\xaf\xa4\x30\xd7\xad\x31\xfd\x31\xe6\x49\x52\xa0\x74\x94\x70\xb2\x79\xe6\x2a\xf1\xcf\xb8\x0f\x2c\xbd\x9c\x69\x02\xb4\x1a\x62\xfc\x13\x00\x00\xff\xff\x26\x8f\xb5\xb6\x7f\x04\x00\x00") + +func templateDialectGremlinGroupTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectGremlinGroupTmpl, + "template/dialect/gremlin/group.tmpl", + ) +} + +func templateDialectGremlinGroupTmpl() (*asset, error) { + bytes, err := templateDialectGremlinGroupTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/gremlin/group.tmpl", size: 1151, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectGremlinQueryTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x56\xdf\x6b\xdb\x30\x10\x7e\xb6\xff\x8a\x5b\x29\x43\x2e\x46\xed\xf3\x4a\x06\xeb\x8f\x6d\x85\xae\xdb\xba\xd2\x97\x31\x86\x62\x9d\x83\xa8\x22\xb9\x92\x6d\x52\x82\xfe\xf7\x21\x59\x4e\x5d\x13\xb3\xb6\xd0\x3c\xd9\x39\xdd\x77\xdf\x77\x9f\xcf\xe7\xac\xd7\xc0\xb1\x14\x0a\x61\x8f\x0b\x26\xb1\xa8\x0f\x17\x06\x97\x52\xa8\xc3\xfb\x06\xcd\xc3\x1e\x38\x97\xae\xd7\xb0\x3f\x6f\x84\xe4\x68\xe0\xc3\x0c\x2a\x66\x0b\x26\x61\x9f\xfe\x2a\x74\x85\xf4\x24\x9e\xc4\x44\x83\x05\x8a\xb6\xcb\xdc\xdc\x6f\xe0\xce\xa5\x69\xd9\xa8\x02\xc8\x93\x5c\xe7\xe0\x60\xc8\xe2\x5c\x06\x51\xc7\xc5\x99\x25\x45\xbd\x82\x42\xab\x1a\x57\x35\x3d\xed\xae\x19\x90\xdf\x7f\x3c\x84\x5e\x9c\xd1\x9b\x87\x0a\xc1\xb9\x1c\xd0\x18\x6d\x32\x58\xa7\x89\x41\xeb\x15\xbc\x8f\x55\xe8\x35\xda\x4a\x2b\x8b\x6b\x97\x26\xa1\xb3\x1c\xe6\x42\x71\xa1\x16\x21\x6f\xa4\x86\x46\xd8\x4f\x9f\x49\x32\x1a\xaf\x69\x22\x4a\xcf\xb1\x0d\xc1\x8d\xbf\xa3\xe7\x2b\x2c\xbc\xde\x1c\x46\x2c\x39\x18\xb4\xd9\x71\x80\xbf\x9b\x81\x12\xd2\xcb\x4c\x0c\xd6\x8d\x51\xfe\x67\x50\x9f\x26\x2e\x4d\x5a\x34\xb5\x28\xd0\xe6\x3d\x97\x41\x4b\xaf\x91\xf1\xdb\x78\x30\x50\xf2\x9f\x52\x82\x87\xf6\x96\xec\x0e\xb7\xf9\x75\x94\x83\x44\x45\x7a\xc2\x2c\x4b\x93\x52\x1b\xf8\x9b\x83\x0f\xe1\x2a\x90\x33\xb5\x40\xe8\x53\x02\x93\xaf\x3a\x03\x56\x55\xa8\x38\x11\xdc\xf6\xe9\xbe\x36\x19\x91\xf8\x9a\x2e\xed\xc5\x85\x64\x25\x64\xfa\xe2\x39\xf8\x24\xe5\xe4\x1c\x04\x0c\xbd\x62\xcb\xb7\x9d\x82\x5b\x26\x1b\xfc\xc6\x2a\x52\x9b\x06\x77\x3e\x14\xcc\xf8\xf2\x95\x6c\x4c\x78\xf9\xae\x07\x9e\x0d\xe3\xc1\x05\xff\x9e\x3d\x95\xb5\x0d\x47\x3f\x1b\xbd\xec\x2d\x21\xcf\x56\x32\x55\xad\xd0\xaa\x14\x8b\xf1\x03\x8d\xe1\x6c\x33\x02\x13\xf0\x57\x8e\xc5\xa9\x6e\x54\x3d\x31\x18\x42\xd5\x6f\x37\x0c\x1d\xf1\x0e\xa6\xe0\xe8\xd1\xf9\x18\xe9\xd7\xc1\x85\x17\xf0\x72\xcb\xce\x57\xc2\x4e\x59\x36\xd7\x5a\xbe\x9d\x67\x5f\x99\xbd\xc2\xd5\x4e\x5c\x2b\x99\xb4\x38\xe9\xdc\x89\xd6\xf2\x35\xd6\x45\xd9\x70\xc0\xad\xa4\x37\x86\xb5\x68\x2c\x0b\xbc\xad\x6f\x61\x41\x6f\xbb\x2e\x2f\xd9\x1c\x65\xb7\x09\x7f\xb0\xe2\x8e\x2d\xfc\x62\xa2\x21\xda\xf5\x3c\x61\xd4\xb0\x91\x16\x26\xfd\xa4\xa7\x52\x2b\x24\xdd\x66\x8d\x0b\xbb\x7a\xdc\xd5\x63\x54\x65\x90\x8b\x82\xd5\x71\x7b\x57\xf4\x4b\x57\x86\xb4\x5d\x05\x51\x86\xaf\xc0\x18\xa6\x0d\x47\x93\xc1\x47\x38\xea\xf4\xd0\xef\x3e\xe0\x59\x9f\xc1\x19\xc0\x01\x37\xe2\xf3\x84\x91\x53\x2c\x45\xbd\xed\xc9\x87\x83\xe3\x78\x3e\x74\x84\x5e\xfa\x10\x39\x08\x27\x1b\xed\x8d\x12\xf7\x0d\x6e\x2b\xd4\x9d\x1c\x87\xee\xba\xfb\x0c\x66\xb3\x4d\x3f\x67\xc8\x9b\x8a\x3c\xf9\x3e\xb5\x69\xf8\x17\x83\x8a\x83\x73\xff\x02\x00\x00\xff\xff\xc0\x27\xa1\x73\x1d\x09\x00\x00") + +func templateDialectGremlinQueryTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectGremlinQueryTmpl, + "template/dialect/gremlin/query.tmpl", + ) +} + +func templateDialectGremlinQueryTmpl() (*asset, error) { + bytes, err := templateDialectGremlinQueryTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/gremlin/query.tmpl", size: 2333, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectGremlinUpdateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x58\xdf\x6f\xdb\xb8\x0f\x7f\x76\xfe\x0a\x7e\x83\x6c\xb0\x0b\x7f\xd5\xee\xb5\x87\x1e\x70\xeb\xb2\x2d\x87\xbb\x76\x5b\x7b\xbb\x87\xa2\x28\x34\x9b\x4e\x84\x3a\xb2\x27\xc9\x59\x87\xc0\xff\xfb\x81\x92\xfc\x33\x69\xb7\xee\xb6\xdb\xcb\x16\x4b\x14\xf9\x21\xf9\x21\x45\x75\xbb\x85\x14\x33\x21\x11\xa6\xa9\xe0\x39\x26\xe6\x70\xa9\x70\x9d\x0b\x79\x58\x95\x29\x37\x38\x85\xba\x9e\x6c\xb7\x30\xfb\x50\x89\x3c\x45\x05\xc7\x27\x50\x72\x9d\xf0\x1c\x66\xec\x22\x29\x4a\x64\xcf\xfd\x8e\x17\x54\x98\xa0\xd8\x38\xc9\xf6\x77\x7b\xdc\x0b\x15\x12\x69\x7f\xc5\xf5\x45\x95\x65\xe2\xae\x13\x98\x9e\x4b\x67\x74\x92\x55\x32\x81\x70\xa0\xb2\xae\xe1\xa0\x0f\xa6\xae\x23\xf0\x78\x2f\xf8\x06\xc3\xc4\xdc\x41\x52\x48\x83\x77\x86\x9d\xba\xff\x23\x52\xf1\x7f\x10\x19\xc8\xc2\x38\xc3\x75\x7d\x75\xbd\xdd\x02\xca\x14\xea\xda\xea\x63\x67\x7c\x4d\xeb\x31\xa0\x52\x85\x8a\x60\x3b\x09\x14\x6a\x82\xf8\xd4\xeb\x67\xef\x50\x97\x85\xd4\xb8\xad\x27\xc1\xc7\x0a\xd5\xe7\x18\x3e\x08\x99\x0a\xb9\xb4\x72\x23\x9c\xcc\x1f\x6b\x8c\x7b\xc3\x63\x29\x91\xb6\x40\x22\xf6\x96\xb4\x86\xd1\x24\x10\x19\xe1\xd8\xa7\x35\x55\xf4\x8b\xcd\xef\x30\x21\x67\x63\x18\x21\x89\x41\xa1\x8e\x7e\xb1\xc7\xff\x77\x02\x52\xe4\xe4\x4a\xa0\xd0\x54\x4a\xd2\xa7\xf5\x70\x12\xd4\x8d\x91\x18\x8a\x5b\x32\x24\xf4\x69\x21\xb5\xe1\xd2\xcc\x29\x02\xa1\x53\x53\xdc\xde\x7b\x9c\xfc\x9a\x39\x3a\xe4\x95\xb2\x74\x78\xd7\x01\x75\xfb\x9d\xdf\x93\xa0\x39\x70\x32\x16\x0c\xac\x8f\x94\x5a\x8a\x76\x3f\x1b\xdb\xa4\x90\x99\x58\x1e\xef\x44\xc1\xad\x7b\x23\x98\x6b\x6f\x61\xc3\x15\xb4\xca\xb6\xdb\x0e\x98\xd7\xd7\xb3\xe5\x55\x8c\xd9\xe5\x97\x23\xaf\xd9\xe6\x65\x27\x1d\x24\xf7\x52\x15\xeb\x86\x10\xe1\x57\x87\xdc\xaf\x35\x5a\x62\xda\x9d\x3c\x9a\xe9\x24\xd8\x45\x56\xa4\x56\x1f\x5b\xbc\x60\x97\x9f\x4b\xcf\x31\xcf\x28\x38\x48\x75\xce\x2e\x15\xdf\xa0\xd2\xdc\xe2\x22\xbf\x3e\x09\xb3\x02\x76\x56\xad\x6d\xca\x15\x17\xd2\xb8\xe8\x18\x52\x90\x74\x8b\xda\xa8\x2a\x31\xd6\x9d\xa0\x54\x98\x8e\xf5\x1d\x1e\xf6\xa5\x49\x42\x24\xdc\x20\x23\x79\x83\xda\xec\x91\xb7\xcb\x6b\x6e\x92\x15\x6a\xe0\x32\x05\x61\xb4\x53\xc2\xa5\xa1\x83\x84\xa3\x53\x6a\x4b\x6b\xcd\x6f\x31\xbc\xba\x3e\xe8\x96\x63\x38\x8a\xc9\x6d\x46\x5e\x0e\x93\x45\xbf\x0f\x0f\x20\xe1\x1a\xa1\xc8\xc0\x75\x31\xd0\x25\x26\x22\x13\x09\x6c\x50\x19\xbc\x83\x83\xc3\xbd\x1c\xdd\x90\xb9\x25\x7b\x1f\x8a\x34\x6a\x55\x2d\x51\x22\xf1\xc8\xab\xca\x0a\x05\x67\x56\x8f\x48\x50\xf7\x34\xf5\x88\xd8\xa8\x89\xd8\x6b\xae\xff\xe0\x1f\x30\xb7\xd9\x65\x6f\x78\x72\xcb\x97\x24\xc5\xec\x6a\x34\x09\x02\xd2\x77\x13\x43\x69\xfb\x25\x97\x4b\xdc\xa1\x7b\x1b\x58\xed\x53\xc1\x5e\x79\x26\x6c\x22\x17\xb1\x7e\x00\xa8\x0a\x42\x47\x75\x91\x41\xa1\xc6\x99\x0e\x73\x94\x30\x63\xf3\x74\x89\x3a\x82\xba\x56\x1b\x38\x81\x0d\x3b\xcd\x0b\x89\x61\xd4\x92\x87\xe8\xa0\x34\x5c\x5d\x0f\x73\x38\x09\x7c\x60\x1c\xd4\xd9\x4d\x0c\xb3\x8c\xa0\xcf\xd8\x4b\x81\x79\xaa\x9b\x8a\x19\x3b\x41\xdf\x19\xbb\xb0\x8c\xb2\x92\x44\xf1\x5e\xbd\x34\xb9\xc8\xd8\x5f\x52\x7c\xac\x7c\x24\x07\x54\x38\x01\x5e\x96\x28\xd3\xb0\xb7\x18\xc3\xd3\xee\xcb\x46\xc7\x32\xf5\xb8\x0b\xff\xfe\xc8\xc7\x30\x5e\x76\x00\x9b\x2e\x68\xcb\xf3\xe0\x6b\x9c\x88\xd8\x69\x51\x49\x13\x46\xb1\xb5\x4e\x04\x3f\x86\x9b\x1b\xb6\xd0\x61\xc9\xce\xe6\x6f\xc3\xa3\x28\x6a\xd5\x86\x67\xf8\x69\xae\x94\x73\xd1\x6a\xf8\xe1\xf0\x1c\x2e\x2a\x93\x01\x4d\x82\x0d\x7b\xa3\x8a\x12\x95\xf9\x1c\x52\x86\x2f\x84\x5c\xe6\xf8\x1d\xed\x4e\xc6\xc4\x1c\xb1\x06\x1d\x6b\x2c\x0f\x7b\x37\x44\x2a\x14\x26\x46\x14\x92\xb6\xa7\x0b\x39\xed\xed\x49\xea\xe3\x74\xe3\x28\x21\x4d\x06\xd3\x27\x9a\x3d\xd1\xd3\x1e\xde\x19\xf6\x91\xf6\x58\x85\x6c\xa1\x17\x92\x28\xdc\x10\x6b\x64\xec\x04\xa6\xe7\x95\x99\xf6\x37\xad\xb5\x5d\x63\x68\xfb\xec\xc3\x26\xdb\x20\xfb\x16\xa2\x70\x5d\x6c\x10\xd0\xfa\xea\x3a\x46\x0f\x5a\x9f\xf0\x7b\x2a\x27\xc9\x91\x2b\xa4\x41\xa1\x19\xbc\xb0\xb9\xd1\xda\xca\xe9\x7a\x0f\xb5\x13\x91\xde\xdf\x4c\x1c\x94\x2f\x68\xeb\xc3\x77\x18\x2f\x30\xcf\xde\x61\xe6\xe3\x63\xdc\x74\xd7\x76\x0d\xf6\xbc\x30\xab\xb9\xa5\xb2\x74\xba\x22\xd7\x7e\xec\xcc\xd5\xf3\x90\xfd\xbd\x42\x85\xe1\xcd\x0d\x3b\x57\xf4\xef\x42\xfa\x2a\x5d\xbc\xa0\x9e\x1b\x53\xe1\x9c\x57\x66\xb0\x18\x75\x4d\x89\xbd\x50\x45\x19\x46\x6c\x61\x50\x71\x83\x61\xd4\x77\x7f\x7f\x9e\x77\xa0\x2e\xe4\x23\x81\x9a\x15\xaa\x21\xa0\xaf\xc3\x73\x8f\xfd\xf3\xca\xfc\x07\x00\x9a\x0c\xda\x26\xde\x36\x4e\xa3\x74\x0c\x46\xf9\x5b\xa3\x61\xa7\xbf\xd8\x06\xec\xfc\x12\x8d\xe8\x1b\xc7\xed\x7c\xfb\x60\xc5\x6d\xd8\x6f\x69\x3a\x74\xdd\xce\x51\xa1\xbf\x70\x23\xc7\x86\xdd\x10\xee\x3b\x78\x59\x74\xc7\x1c\x61\xee\xe7\xee\x6b\xae\xc7\x93\xce\xbd\xcc\xfe\x96\x0b\xc7\xdf\x38\xa3\x72\x18\xe2\x1d\xdc\x11\x8f\xb9\x24\xa8\x3f\x3e\x74\x47\xc8\xe6\xe5\x42\xa1\x70\xea\x6d\xaf\xff\x37\x9e\x2c\xd9\x7c\x3c\xba\xb4\x8e\x7c\x53\x01\xff\x04\xf7\x47\x1c\xfa\x41\xd1\xa0\x8f\xee\x12\xa9\xeb\x81\xdf\x3f\xcb\xeb\x7e\x05\xb4\x1f\xe3\x29\x91\xbd\xe7\x79\x85\x7f\xf2\x32\x34\xaa\xc2\xe8\x0b\x2f\x03\x91\x41\x8e\xb2\x1f\xaf\x08\x7e\x85\x23\x37\x8f\xfa\x36\x42\x63\x3a\xe8\x4a\x21\x98\x15\x82\x69\xa7\xfe\xb4\x40\x6d\xdb\x1b\xbd\xcc\xb9\x90\xb0\x2e\xac\x0c\x97\x40\x83\xb7\x9f\xc8\x45\x06\x9f\x10\x56\x7c\x33\x78\x81\xf8\x6e\x14\xec\x3e\xe1\xbf\x43\xb1\x3e\x90\x9d\x57\x97\xe1\xb3\x7e\x72\x9e\xce\x95\xea\x62\xf2\x92\x8b\x1c\xd3\xed\x5a\x2f\x8f\x61\xea\xbb\x67\xe7\xaf\x77\x53\xef\xf5\x73\x5a\xdf\x9f\xaf\x80\x26\xf1\x1e\xfa\xab\xa3\x6b\x3b\xfb\xb3\xd3\x82\xe7\xa8\x13\x0c\x47\x9b\x84\x39\x06\xfb\x08\x68\x9e\x11\x89\xea\x7a\x76\x5f\xfa\xd9\xf1\xb5\x4b\x96\x33\xa2\xc6\x8a\xd5\x40\xd9\x1e\xc2\xec\xde\x23\x24\xea\x5f\xb4\x34\x40\xfe\x5e\x08\x49\x1b\x8c\xb1\x68\x62\xff\xca\xe3\x8f\xfe\x13\x00\x00\xff\xff\x08\x74\x64\xd2\x64\x12\x00\x00") + +func templateDialectGremlinUpdateTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectGremlinUpdateTmpl, + "template/dialect/gremlin/update.tmpl", + ) +} + +func templateDialectGremlinUpdateTmpl() (*asset, error) { + bytes, err := templateDialectGremlinUpdateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/gremlin/update.tmpl", size: 4708, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectSqlCreateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x5f\x6f\xdb\xb6\x17\x7d\x96\x3e\xc5\xfd\x19\x6e\x21\x05\x0e\x93\xf6\x31\xbf\x65\x40\xd7\xa4\x80\xb1\xa5\x49\xe3\x74\x7b\x68\x8b\x81\x96\x2e\x1d\x22\x32\xe5\x90\xb4\xe3\xc0\xd0\x77\x1f\x2e\x65\x59\xb2\x2c\x3b\x8e\x1b\xd4\xdb\x30\xbf\x24\xb2\xef\x3f\x9e\x7b\x78\x0f\xc5\xd9\x0c\x62\x14\x52\x21\xb4\x62\xc9\x13\x8c\xec\x91\xb9\x4f\x8e\x22\x8d\xdc\x62\x0b\xb2\xcc\x9f\xcd\xa0\xdd\x1f\xcb\x24\x46\x0d\x27\xa7\x30\xe2\x26\xe2\x09\xb4\x59\x2f\x4a\x47\xc8\x7e\x99\xff\x32\x37\xd4\x18\xa1\x9c\xe4\x96\x8b\xff\x17\xee\x59\xe6\xfb\x62\xac\x22\x08\x96\x6c\xb3\x0c\x0e\xaa\x59\xb2\x2c\x04\x73\x9f\xf4\xf8\x04\x83\xc8\x4e\x21\x4a\x95\xc5\xa9\x65\xef\xf3\xbf\x21\x04\xce\x9c\x7d\xe4\x43\x84\x2c\xeb\x00\x6a\x9d\xea\x10\x66\xbe\x37\xe1\x1a\x02\xdf\xf3\x34\x1a\x0a\xc1\xae\xd1\x8c\x13\xeb\x7b\x9e\x73\xb8\xae\x64\x3c\x85\xd7\xd5\x20\xb3\x28\x55\x42\x0e\x4e\xa0\x56\x19\xcb\xbf\xcf\x7c\x2f\xf4\x3d\x3b\x75\xb9\x68\x71\x75\xb3\x58\xd3\x7f\xec\x66\x4a\x15\x87\xbe\x27\x85\xb3\xfc\xdf\x29\x28\x99\x50\x65\x9e\x46\x3b\xd6\x8a\x1e\x5d\x10\xdf\xcb\x7c\xaf\x82\x2b\x55\xdb\x55\x06\xb5\x75\xe0\xb0\x2b\x1e\xdd\xf1\x01\x95\xc6\x6e\x78\x3f\xc1\x90\x9d\xa1\xe0\xe3\xc4\xd6\xb1\x2b\x52\x9f\xe5\xed\x0b\xc2\xd0\xf7\x66\xb3\x43\xd0\x5c\x0d\x10\xda\x7f\x76\xa0\x2d\x28\x41\x9b\x7d\x90\x98\xc4\x86\xba\xe0\x51\x7d\xf5\x38\xe4\xd4\x16\xac\x67\xf5\x38\xb2\xce\x96\x70\xaa\xac\xa0\x28\x97\xf5\x70\xb5\x48\x7a\x16\xd4\x22\x63\xb9\xb2\xae\x2d\x07\xab\x19\x56\x13\x84\x14\x78\x36\x03\x29\xe8\xb7\x8f\xe3\x24\xa1\xd5\x42\x96\x91\x37\x2a\x32\xa9\xf7\x8e\xe2\x14\x34\x14\x45\x03\xe1\x74\xbb\x7c\xbe\x47\xc0\xd3\x5a\xf3\xe0\xbe\x77\x3f\x46\xfd\xd8\x01\xae\x07\x86\x70\x2a\x16\xf9\x89\xbe\x0e\xca\x56\x9e\x9c\x82\x9d\xb2\xf3\x29\x46\xd4\xe2\x0e\x54\xdc\x3a\xf0\x5a\xa3\x09\xff\xbf\xa9\xe5\x3a\x4d\x92\x3e\x8f\xee\x82\x39\x89\x42\x47\x00\x19\x2f\x18\xa5\xd1\xb0\xdf\xb8\xb1\x39\x09\xba\x71\xf0\x24\x8b\x9a\x43\xae\xa0\xd5\x3d\x03\xc7\x57\x42\x98\x75\xcf\x58\xd7\xf4\xac\x96\x6a\x00\x59\x66\xac\x8e\x52\x35\x61\x1f\x52\x3d\xe4\xb6\xab\x6c\x40\x05\xbd\x39\x0e\x09\xfb\xc4\x10\xae\x32\x5e\xf4\x61\x85\x57\x98\xf3\xea\x3c\x1e\x60\x49\xab\x04\xd5\x0a\x45\xe9\x19\xeb\x8d\x87\x9f\xe1\x38\xe7\x15\x85\x95\x02\xb8\x8a\xc9\xec\xb3\x92\xf7\x63\x74\x0e\x98\x88\x6b\x14\x8e\x02\x47\x07\x70\xf9\xf6\x12\x1e\xa4\xbd\x05\x83\x89\x00\x8d\x02\x35\xaa\x08\xe1\xe0\xc8\xe5\xf6\x3c\x91\x6a\x40\x19\x3b\x34\x5d\x95\xdb\xd4\x91\x97\x90\x17\x61\x71\x38\x4a\xb8\x6d\x1c\x86\x47\x84\x14\x6a\x2b\xe3\x16\xad\xfc\x70\x9e\xb3\x4e\x1f\xda\xc7\x9f\x47\x31\xb7\xd8\xb8\x45\x30\xdf\xcd\x95\x7d\x12\xb2\x3c\x8e\xe7\xad\xdb\x56\xc8\xde\xa7\xc9\x78\xa8\x96\x36\x17\xca\xb8\xf4\xfc\xe3\x16\x35\x06\x94\xfa\xfc\x53\x63\x08\xea\xfb\x92\xbb\x8c\xc3\xb0\xa4\x38\x7d\x76\xa7\x39\x7d\x9e\xe0\x25\x7d\x1a\xf0\xfa\x71\x70\x6d\x44\x0b\xd9\xcd\xe3\x08\x1b\x72\xd3\xb7\x75\xe4\x1c\xf0\xef\x54\x1c\x84\xac\x6b\x68\x5e\x6d\x5b\xc4\xbe\x00\xe7\x42\x60\x64\x71\x79\xd2\x5c\xa7\x0f\xe6\xdd\xfc\x87\x5a\x41\x3b\x27\x92\x02\xa4\xb2\x41\x91\x2f\x84\x9f\x9e\x31\x0c\x9e\x4c\xf7\xfa\x5c\x6b\x87\xa6\xe6\x52\xd9\x0f\x5c\x26\x18\xcf\x86\x66\x70\x02\x62\x68\x59\x6f\xa4\xa5\xb2\x22\x68\x7d\x6d\xe5\xf1\xe7\xb2\xf0\xb5\x05\xc1\xab\x49\x08\x3c\xd1\xc8\xe3\x47\x3a\x4b\x28\x57\x1d\xd8\x14\x38\xc4\x52\xb8\x29\x62\x21\xf7\x2b\xdd\x5a\x79\xa7\xb3\xa5\x25\x66\xc5\xb4\x72\xc3\x91\x06\x2a\xb2\x8b\xb7\x17\x00\xfb\x1c\x40\x14\x93\x53\xca\xe3\xb9\x56\xf6\xe9\xe1\x8d\x7b\x38\x9c\x17\xd9\x35\x5d\xf2\x75\x13\x3d\xb7\x2f\x2c\xc8\x7c\xe1\x5a\xcc\xf9\x35\x73\x6d\xcd\xf9\xe4\xa9\x8d\x9a\xef\x04\xb3\xc6\xef\xea\xd7\x8a\xd3\x97\xbc\xb8\x2c\xfb\xd6\x81\x6d\xcd\xfb\x64\x5e\x66\xfb\x9d\x27\x63\x34\x4e\xc8\x96\x66\x64\x09\x46\x4d\x56\x48\x4d\x0e\x35\x0a\xc8\x61\x36\x60\x6f\x11\xd0\x69\x9a\x54\xd0\x4f\xed\x2d\x3c\xf0\x47\xc3\x4a\x9d\xa9\xa4\x41\xca\x53\x4f\x53\x85\xd1\xf3\xf6\xb1\xe9\x9b\x89\x7a\xb9\x57\x9e\xbe\x98\x50\xee\xac\x93\x3b\xca\xa4\xff\xf7\xea\xe3\xe5\xdb\x8b\xa2\x8f\xa3\x02\xc8\xab\x79\x5d\x7b\x69\xec\x88\x5d\xea\x20\xdc\x59\x4c\xcb\x85\xbe\x18\x45\x76\x3c\x1a\x94\xfc\x20\x7d\x1f\x75\xf2\xa1\xf7\x4c\x91\x2f\x82\x55\xe9\xf2\x5d\x6c\x79\x9a\x2c\x39\x78\xdb\xea\x7c\xa3\xcc\x6f\x9b\xe4\x25\x34\xfe\x7b\x25\x3e\x55\x08\xa9\x80\x55\xa5\x7f\x35\xd9\x49\xe7\xef\xf0\xd1\x6c\xb7\x82\xe2\x38\xb0\xbc\x2d\x2b\xaf\x27\x0b\x89\x28\xd4\x66\x41\xfb\xca\x5b\x57\x8e\x02\x56\xde\xfd\x8a\xf7\xb0\x77\x36\x95\xc1\xf6\xe5\x7c\x39\xfe\xb6\xf5\xd1\xcd\x5d\x39\x94\x7d\xac\x14\xbf\x28\x87\x2a\x79\x56\x72\xbf\x41\xf0\x9a\xcf\x1c\xff\x68\x05\xd8\xf5\xb4\xdf\xa0\x1b\x2b\xb0\xef\x07\x92\x4d\x88\xfc\xb0\x97\xa1\x75\xf0\x94\x5c\xfa\x6f\x6c\xfe\x5b\xc7\xa6\x6a\xbe\x8a\x5b\x6a\xf8\xfb\x74\x38\x94\x36\xd8\x7c\xad\x56\xdc\xa4\xce\xbf\xab\x5f\x7e\x75\xc8\xca\xcf\x7c\xbf\x7c\xb9\xf1\x37\x5e\x79\x57\xcf\x38\xc5\x8d\xd7\x86\x39\xbe\x76\x88\xcf\x0f\x36\x0d\xa4\x59\x1d\xca\xcb\x20\x94\x95\xfe\x15\x00\x00\xff\xff\x2f\x84\x95\x33\x9c\x17\x00\x00") + +func templateDialectSqlCreateTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectSqlCreateTmpl, + "template/dialect/sql/create.tmpl", + ) +} + +func templateDialectSqlCreateTmpl() (*asset, error) { + bytes, err := templateDialectSqlCreateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/sql/create.tmpl", size: 6044, mode: os.FileMode(420), modTime: time.Unix(1560438408, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectSqlDeleteTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x91\x4d\x6b\xf3\x30\x10\x84\xcf\xd2\xaf\x58\x42\x78\xb1\x5e\x8c\x72\x2f\xf4\xd2\xaf\x53\x0f\x4d\xd3\x7b\x51\xa4\x49\x30\x55\x6d\x67\x25\x87\x14\xb3\xff\xbd\xc8\xf9\x20\xa5\xf4\xe4\xf5\x6a\xb4\xf3\xcc\x6a\x1c\x29\x60\xd3\xb4\xa0\x59\x68\x5c\x84\xcf\x8b\xb4\x8b\x8b\x80\x88\x8c\x19\x89\xe8\x71\xa4\xf9\x7a\x68\x62\x00\xd3\xcd\x2d\xf5\x2e\x79\x17\x69\x6e\x57\xbe\xeb\x61\xef\x4e\x27\x27\x21\xc3\xa3\xd9\x1f\x95\x97\xfa\x72\x5d\x44\xeb\xcd\xd0\x7a\xaa\xae\xb5\x22\xf4\xff\xda\x44\xc4\x50\xda\xc5\xc7\x03\x7c\xe5\xf3\x81\x7c\xd7\x66\x1c\xb2\xbd\x3f\x7e\x0d\x81\xb9\x63\x1a\xb5\xda\x3b\x26\x46\x2a\x6a\xfb\x8a\x34\xc4\xac\x55\x42\x09\xd1\x4d\x04\xa5\xbf\x9a\xfe\x2b\x63\x9f\xb8\xfb\xac\x4a\xe7\xcd\xad\x23\x26\x02\xfb\xe2\xfc\x87\xdb\x82\x44\x8e\x5d\x63\xb4\xda\x74\x4c\xef\x35\xf5\x53\x04\xd7\x6e\x41\x3f\x82\x89\xd8\x9e\x11\x1a\xef\x32\x52\x81\x50\xbd\x5d\x2d\x9f\xab\xb3\xaf\xd1\x4a\xb4\xda\x0d\xe0\xaf\x9a\x1c\x6f\xd3\x19\xe4\x61\x5a\xe9\x1f\xbe\x13\xdd\x09\xf5\x32\xc9\x2e\xcb\x94\xca\x68\xc5\xc8\x03\xb7\xbf\x40\x02\x97\xca\x9e\x37\x55\xd3\x95\x6d\x4d\xff\x18\xc9\x68\xd1\xe5\x61\xd0\x06\x12\xf9\x0e\x00\x00\xff\xff\x90\x3c\x86\x4f\xed\x01\x00\x00") + +func templateDialectSqlDeleteTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectSqlDeleteTmpl, + "template/dialect/sql/delete.tmpl", + ) +} + +func templateDialectSqlDeleteTmpl() (*asset, error) { + bytes, err := templateDialectSqlDeleteTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/sql/delete.tmpl", size: 493, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectSqlGroupTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x52\xc1\x6e\x1c\x21\x0c\x3d\x0f\x5f\xe1\x46\x51\x35\xa4\x88\xf4\xdc\x6a\x2f\xc9\xa1\x97\x5e\xd2\x3d\x56\x55\x45\x19\xb3\x42\x25\x66\x06\x98\xcd\x46\x23\xfe\xbd\x82\x61\x93\x34\x9b\x3d\xe4\x84\x65\x3f\xfb\x3d\x3f\xbc\x2c\x30\xa0\xb1\x84\x70\x31\x58\xe5\x50\xa7\xeb\x38\xb9\xeb\x5d\xf0\xf3\x78\x01\x39\xb3\x65\x81\xcb\x3f\xb3\x75\x03\x06\xf8\xb2\x81\x51\x45\xad\x1c\x5c\xca\xad\xf6\x23\xca\x9b\x56\x69\xc0\x80\x1a\xed\x7e\x45\x3e\xc5\x4f\xed\x39\x33\x66\x66\xd2\xd0\xff\x87\xcd\x19\xae\x5e\xb2\xe4\xcc\x21\x4e\x6e\xab\x15\xf5\x3a\x1d\x40\x7b\x4a\x78\x48\xf2\x76\x7d\x05\xec\xc1\x52\xc2\x60\x94\xc6\x25\x73\xc0\x10\x7c\x80\x85\x75\xc1\x3f\xc4\xc2\xfc\x31\x4e\x4e\xfe\xf0\x0f\x71\xc9\xac\x9b\x66\x0c\x8f\x02\x54\xd8\xd5\xda\x2b\x66\x19\x27\x77\x57\x10\x3d\x97\xed\x65\x9d\x35\x65\xe6\x5b\xe8\x21\x94\xa8\x21\x75\x3a\x08\x78\x31\x5e\x40\x11\xc0\xbf\xd6\xe6\x0f\x1b\x20\xeb\x8a\xaa\x2e\x60\x9a\x03\x95\x2c\xeb\x32\xeb\x06\x34\x18\x2a\x54\xde\x3a\x1f\xb1\x30\x36\x48\xd1\x5d\xd6\xde\x3a\xab\xb1\x2f\x10\x01\x7b\xce\x32\x7b\x8f\x6f\x6d\x0d\xb8\xaa\xd3\xb0\x7c\xe9\x6a\x4f\x3c\xc6\x6f\xdb\xc0\x3a\xed\xdd\x7c\x4f\xd5\xa6\x7b\xf5\x17\xfb\x9f\xbf\x62\x0a\x96\x76\x02\x3e\x0b\x70\x48\xaf\xe9\xa5\xb1\xe8\x86\xc8\xe1\xd3\x49\xb5\x14\x29\x72\xfe\x3c\x74\x03\x6a\x1c\x91\x86\xbe\x25\xc4\x89\x86\x75\x9a\x94\x92\xb3\xce\xf8\x00\xbf\x05\x18\xaa\x97\xa4\x68\x87\xa7\x70\x8a\xd5\xde\xf3\x04\x86\xe4\xf6\xee\x7b\x7f\xdc\xbb\xa8\xc9\xcf\x5e\xb7\x6c\xb3\xe8\xd8\x55\xe8\xe5\xb7\x72\xfd\x37\x8f\x67\xf6\xad\x0a\xeb\xbd\x23\x0d\x90\xf3\xbf\x00\x00\x00\xff\xff\x63\x5a\x3f\x5b\x43\x03\x00\x00") + +func templateDialectSqlGroupTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectSqlGroupTmpl, + "template/dialect/sql/group.tmpl", + ) +} + +func templateDialectSqlGroupTmpl() (*asset, error) { + bytes, err := templateDialectSqlGroupTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/sql/group.tmpl", size: 835, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectSqlQueryTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x55\xdf\x6f\xdb\x36\x10\x7e\xb6\xfe\x8a\xab\xd1\x15\x52\xa0\xb1\xcd\x6b\x02\x0f\xd8\xe2\x0e\x08\x50\x14\x6b\xdd\xb7\x20\x18\x18\xea\xe4\x11\xa1\x49\x99\xa4\x1c\x07\x82\xfe\xf7\xe1\xa8\x1f\x96\x7f\xc8\x4b\x82\x01\x7d\x08\x22\x93\x1f\xf9\xdd\x7d\xf7\xdd\xb1\xaa\x20\xc3\x5c\x6a\x84\x69\x26\xb9\x42\xe1\x3f\xba\xb5\xfa\xb8\x2e\xd1\x3e\x4f\xa1\xae\xa3\xaa\x82\xf7\xc5\xe3\x12\xae\x66\xf0\x9e\x2d\x84\x29\x90\xfd\xc5\xc5\x23\x5f\x62\xb7\xfb\x50\x4a\x95\xa1\x25\x44\xc1\x9d\xe0\xaa\x07\xfe\xd1\xee\xb4\x40\x8b\x02\xe5\xa6\x41\xf6\xdf\xfd\xf1\xba\x8e\xa2\xbc\xd4\x02\xe2\x3d\x6c\x5d\xc3\xc5\x90\xa5\xae\x13\x70\x6b\xf5\xbb\x52\xb1\xf0\x5b\x10\x46\x7b\xdc\x7a\x76\xd3\xfc\x4f\x20\xbe\xbb\x0f\x78\xf6\x95\xaf\x28\xc4\x14\xd0\x5a\x63\x13\xa8\xa2\x89\x35\x4f\x8e\xc8\x3f\xb8\xb5\x62\xdf\xcd\x93\xab\xea\x68\xe2\x90\xb2\x36\x21\xaa\x03\x66\xe6\xd6\xea\x1b\x29\x11\x27\xd1\x44\xe6\x50\x6a\xb9\x2e\xf1\x14\xb0\xd9\xb9\x06\x85\x3a\x6e\xbe\x13\x98\xcd\xe0\x13\xb1\xf6\x0c\x6c\x2e\x9d\x97\x5a\x78\xba\xae\x8e\x26\x41\xe4\x14\xb8\x5d\x86\xa8\x7a\xd8\x90\x12\xed\xc9\xc0\x32\x4b\x5f\x2d\x52\xf8\x6d\x0a\x83\xcb\x52\xa0\x44\x93\xeb\x70\xf8\xdd\x0c\xb4\x54\x21\x0e\x8b\xbe\xb4\x9a\x7e\x06\x51\x42\x0c\x19\xe6\x68\x03\x9e\xdd\x28\xe3\x90\x68\xab\xea\x57\x62\xf3\xa1\xa2\xaa\xb4\xa1\xa2\xdf\x77\xec\xd1\x64\xc3\x6d\x1b\x92\xa7\x02\x55\xd5\x0e\x17\x64\x0f\xa0\xc3\xe8\x09\xca\xfe\xb4\x66\x45\xca\xc7\x2f\x0f\x71\x70\x5a\x18\x9d\xcb\xe5\xa1\x41\xda\xe5\x24\xea\x8e\xef\x4e\xa4\x74\x55\xf4\x2a\x67\xdd\x98\x52\xfb\x11\x6f\x49\xed\xff\x37\x3f\xed\xcc\x74\x77\xef\xbc\x95\x7a\x59\x05\xfc\xa0\xbd\x58\xf8\x7d\x3b\xa7\x08\x9c\xe7\x3a\x88\xdd\x28\x4b\x46\x3b\x6d\xc2\x04\x7e\x6b\x7d\xd7\x32\x8c\xb9\x35\x88\xdb\x9b\xae\x49\x9b\x12\xe9\x5d\x3a\xd8\x53\xe5\x4a\xbb\xd6\xd9\x8c\xb1\x84\xfe\x92\x9f\xe6\xe0\x4f\xe7\xfd\x2b\x73\x78\x17\x56\xbe\xe2\xd6\xc7\xc9\xf1\x49\x63\x69\xef\x29\x9e\x76\xc3\xad\xae\xaf\x40\x9b\x70\x0d\xe4\xa6\xd4\xd9\xb4\xe9\x50\xf2\xb9\x06\xa9\xfd\x30\x93\x70\xf5\x42\x70\x1d\x7f\xd0\xe7\x42\xcc\x57\x9e\x7d\x26\xb2\x7c\x9f\x28\xe7\x52\x61\x06\x16\x79\x26\xf5\x12\x04\x09\x7f\x05\xbf\x6c\xa6\x21\xb6\x86\xb8\xeb\x83\x37\xf8\xf7\xf3\x56\xba\x31\xff\x3e\x18\xa3\x86\x06\xd6\xe9\x58\x79\x86\x8d\xb0\xab\xe3\x71\x9e\x39\x57\x0e\xc7\x73\x15\xff\xa0\x78\x04\xa4\x90\x50\x0b\x1c\x4b\x93\x2c\xfb\x86\x54\x6f\xe7\x6e\xf4\x11\xe8\x5a\xe7\xc7\x73\x71\xf8\x0c\x6c\xdc\xb9\xb4\xdb\x97\xe5\x5c\xd2\x7b\xe3\x89\x3c\x22\x33\x07\x47\x94\xd1\x24\x37\x16\xfe\x4e\x61\x13\x5c\xc3\xf5\x12\x61\xe3\xc2\x3d\x84\x9f\x01\x2f\x0a\xd4\x59\x2c\x33\x97\xc2\x86\xdd\xce\xf7\x34\x09\xab\xad\x22\xaf\x90\xa4\xed\x3c\xb8\xa0\x4e\x5e\x74\xa3\xa8\x8a\x26\xfe\x32\x74\xe8\x5a\xb1\x1f\xfc\x41\x61\x7c\x38\x6a\xc2\x6a\xb2\x3f\xbe\x76\x77\xc4\xfe\xb2\x9f\x02\x87\x27\xdb\xf5\x6e\x2c\x84\x11\x1f\xfb\xcb\x46\xc0\x13\x02\x0f\x05\xed\xd9\x4e\x96\x62\xf8\x76\xb6\x71\x1c\xcd\xa4\xff\x88\x26\x48\xda\xd6\xa1\xd8\xd5\xe1\x90\xac\xb0\x98\x49\xc1\x3d\x36\xf5\x29\xd8\xe2\xdb\x97\x9e\xeb\xa5\x97\x18\x4b\x85\x18\x3b\x4f\x43\x5b\xae\xa4\x3f\x65\xbb\xb0\x71\xdd\xee\x9f\x90\x87\x7d\xa1\x9d\xf8\x22\x00\xf6\x6c\xd2\x21\xc8\x26\x55\x05\xa8\x33\xa8\xeb\x7f\x03\x00\x00\xff\xff\x60\xec\xa0\x25\xd5\x09\x00\x00") + +func templateDialectSqlQueryTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectSqlQueryTmpl, + "template/dialect/sql/query.tmpl", + ) +} + +func templateDialectSqlQueryTmpl() (*asset, error) { + bytes, err := templateDialectSqlQueryTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/sql/query.tmpl", size: 2517, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateDialectSqlUpdateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x5b\x6f\xdb\x3a\x12\x7e\x96\x7f\xc5\xac\x91\xf6\x48\x81\xab\xa4\x7d\xcc\x59\x17\xe8\x36\x29\x10\xec\xb6\x6e\xe3\x9e\xdd\x87\x9c\xa0\xa0\xa5\x51\x42\x44\xa6\x1c\x92\x76\x92\x35\xf4\xdf\x17\x43\xea\x42\xc9\x72\x6a\xc7\x41\x9b\x83\xee\x43\xd1\x58\xe2\x65\x2e\xdf\x7c\x33\x43\x71\xb9\x84\x18\x13\x2e\x10\xfa\x31\x67\x29\x46\xfa\x40\xdd\xa4\x07\xf3\x59\xcc\x34\xf6\x21\xcf\x7b\xcb\x25\xec\xcd\xae\x2f\xe1\x68\x08\x7b\xe1\x38\xca\x66\x18\x7e\x66\xd1\x35\xbb\xc4\xf2\xed\x64\xce\xd3\x18\x25\x8d\x98\x31\x15\xb1\xb4\x1a\xf8\x8f\xe2\x4d\x31\x50\x62\x84\x7c\x61\x47\x56\x7f\x57\xd3\x8b\x41\x99\x40\x7a\x7f\xc5\xd4\x78\x9e\x24\xfc\xae\x1e\xd0\x1f\x89\x52\xa4\x57\xb0\xf7\x5f\x94\x19\x0d\x3c\x84\x3c\x5f\x2e\x81\x27\x76\xaa\xf9\x61\x5f\x0e\xa1\x2f\x78\xda\xb7\x8f\x50\xc4\xd5\x54\x89\x9a\x66\xf6\x45\xbf\x6b\x2e\xbd\x25\x5d\xcf\x4a\x09\xdd\xf9\xbd\x64\x2e\x22\xf0\x1b\xda\xe4\x39\xec\xbb\x76\xc8\xf3\x00\xd4\x4d\x3a\x66\x0b\xf4\x23\x7d\x07\x51\x26\x34\xde\xe9\xf0\xbd\xfd\x3f\x28\xa7\x6b\x9a\xd9\xd8\xde\x2c\x13\x7e\x62\xd3\x42\x16\x4c\x15\xfd\xc5\x85\xae\x24\x18\x00\x4a\x49\xff\x32\x19\xc0\xb2\xe7\x29\x24\xa7\x65\xc6\xa6\xea\x26\x0d\xc7\xe6\xb7\xd9\xc1\xf1\x53\xd8\xd8\xe6\x7d\x96\xce\xa7\x42\x85\x61\x58\xef\x61\x26\x9c\x1e\x93\x90\x4a\x33\xa1\x5d\xad\x83\xf0\x83\xcc\xa6\x3e\x2d\xff\x95\x4d\x52\x5c\x59\xdd\x3c\x0d\x82\x9e\x47\xe6\xad\xf7\xe9\x79\x5e\x7b\xe4\xe9\x71\xdb\x76\x21\x8f\x83\x70\xfc\xe5\x5f\x7e\xa9\x49\xb1\x4c\x21\x57\xcf\xf3\x92\x4c\xc2\xb7\x01\xcc\x0c\x6c\x98\xb8\x44\x68\x2f\x31\x93\x18\xf3\x88\x69\x54\x64\x12\xcf\x9b\xb5\x17\xf4\xf2\x62\x51\xeb\x45\x4f\x66\xb7\x8a\x96\x7b\x49\x3a\x9d\x65\xb7\x6a\x99\xf7\xbc\x9b\x39\xca\xfb\x01\x30\x79\x69\xde\x95\xd3\xc3\x2f\xf4\xdc\x0f\x7a\x1e\x4f\x8c\xf1\x87\x2b\xfb\xc7\x92\xfe\x2a\x06\x46\xfa\x6e\x00\xce\x5a\x03\xa0\xdd\x82\xdf\xcd\xdc\xbf\x0d\x41\xf0\xd4\x48\x29\x51\xcf\xa5\x80\x0a\xae\x85\x6f\x7b\x24\x6b\x8c\x09\x4a\x33\x2f\x7c\x9f\x66\x0a\x69\xf7\x05\x93\xc0\x63\x05\xe7\x17\x5c\xe8\x9e\xb1\x8a\x19\xf0\x09\xef\xb4\x6f\xb0\x50\x0c\x01\xf3\x7e\xd5\x17\xd6\x19\x0e\xac\x61\x08\x2f\x1b\x88\x8b\x32\x91\xf0\xcb\xa3\x15\xfd\xec\x73\xb3\x46\x61\x83\x23\x6b\x04\x77\x35\x03\x12\xb2\xa5\xdf\xad\x6f\xb7\xc6\xc9\x54\x87\x27\x84\xe6\xc4\xef\x97\x54\x93\xe7\x47\x90\x30\x9e\x62\x0c\x2a\x62\x42\x70\x71\x49\xaa\x92\x5e\x19\xb8\x02\x1f\xc1\x8b\x45\xdf\x58\x8d\x7c\xec\x59\x01\x63\xe8\x90\xad\x34\x06\x41\xfc\x54\x8d\xb5\xa4\x35\xf3\x9c\xc7\x7e\x50\xc7\xc0\xe9\x71\x1d\xe8\x5e\x0b\x84\x8e\xe2\xc6\xea\xe3\x88\x09\xff\x25\x8f\x9f\x48\x4f\x89\x2c\x26\x91\x78\xdc\xa1\x93\x8b\x5c\x8f\x20\x30\x04\x36\x9b\xa1\x88\x7d\x1e\xab\x01\xf0\x38\xe8\x95\x00\x6f\x38\x5c\xdd\x72\x1d\x5d\x81\x20\xa1\x53\x14\x34\x3a\xf8\xdd\xc8\x18\x31\x85\x20\x60\x38\x84\xc3\xa3\xde\x96\x12\xbb\x0e\x00\x91\x69\x48\xb2\xb9\x88\xe1\x96\xeb\xab\x5a\xfc\x8e\x20\xaf\xb7\x7d\x0b\xaf\xb7\xde\x75\x9a\x49\x04\x7d\xc5\x04\x90\x7a\x0d\x19\xcc\xce\xfa\x0a\x41\xd1\x83\xef\x88\x90\xb7\xd9\x85\x27\x95\x6d\x8c\x3d\xac\x0f\xbb\x44\x13\x3c\x5d\x65\x92\xe5\xf2\x60\x9f\xac\xae\xaf\x50\xe2\x6f\x0a\x54\x36\x45\x7d\x45\xae\xd4\x19\xd8\x34\x3a\x00\xa5\x99\xd4\xc0\x40\x4b\x26\x14\x8b\x34\xcf\x44\x08\xfb\x07\x34\x9f\xc8\xc2\x09\xa8\x0e\x56\xf9\x7a\x47\x94\x52\xd3\xcf\x86\x14\x52\x21\xfe\x03\xc7\x34\x56\x56\x59\x22\x08\x9f\xf4\xb3\x92\xc1\x24\xcb\x52\xab\xae\x32\xd9\xe3\x0c\xd5\x3c\x25\xf2\xf0\xca\x64\x66\xb3\xca\x1f\x66\xf8\x1a\xde\x0f\xff\x43\xba\x9b\xf4\x70\x2a\x4e\x85\x56\x5d\xd9\xa7\x9d\x5c\x08\xb6\x94\x81\x28\x6b\x78\x41\x01\x72\xcb\xee\x7b\xdf\x06\xb0\x97\xd8\x82\xc3\x15\x9e\x0c\xd0\xb6\x11\xfd\x4e\xc2\xb1\x96\xf3\x48\x9b\xb1\x04\x88\x46\x30\x16\x9a\x0e\x41\xcb\x39\x9a\x27\x85\x6a\xe1\x18\x3b\xf3\xe4\x5e\xd2\x94\x73\x7f\x93\x3d\x4d\xac\x76\x50\xae\x21\x5d\x7a\x94\x84\x9f\xe6\x69\x4a\xf6\x2a\xf2\xbc\x05\x50\x07\x51\x55\x45\x54\x52\x21\x7c\xb8\x99\x10\x95\x0c\x15\x59\x74\xd0\x47\x52\xa0\xd2\x9a\xa7\x95\xf3\x4a\xd3\x54\x29\xcf\xe5\x3d\x7d\x17\x9e\xdc\x61\xd4\x91\xe0\x5e\x4a\xdc\x98\xf0\x65\x96\xa6\x13\x16\x5d\xfb\x05\xf2\x2b\x96\x73\x23\xd3\x00\xf7\x24\xbe\x44\x65\x6c\x74\xb0\x0f\x28\xb4\x09\xf4\x6c\xae\x21\x31\xa0\x18\xc0\x64\x6e\x9f\x01\x9a\x91\x36\xa0\x0c\xc6\xdb\x78\x6e\x46\x6c\x03\x68\x68\x81\x56\x6e\xe6\x24\x4e\x0c\x3f\xbe\xf9\x58\x43\x8f\x58\xa2\xed\x05\x89\xd3\x6c\x81\xb1\xe3\x34\x2c\x9d\x16\xc0\xdb\x92\x4c\xcc\x8a\x7b\xcc\xa9\x56\xf7\x26\xf4\xe3\x75\x5d\x7e\x62\x78\xaa\x4e\xc5\x02\x65\x55\x8c\x31\xa8\x06\xec\x4d\xa0\x9a\x59\xbb\xd6\x43\x4a\x05\x47\x43\x98\xb2\x6b\xf4\x4d\x55\x30\xd8\x5a\x48\x8b\x5b\xaa\x25\x90\xc7\xeb\x0b\xac\x07\x96\x28\x54\x34\x3a\x6a\x9c\xce\x52\xc2\x56\x47\x33\x71\x10\x65\xa4\x9f\xe6\x71\x9f\x8c\xfe\xaa\x8c\x0f\x6c\x24\x34\x34\x19\x0d\x2d\x57\x17\x19\x7d\xa5\x30\xbb\x49\xc3\x63\x4c\xb1\x83\x90\xe8\x37\x5a\x5a\x72\x62\x38\x08\xed\x56\x1b\xf1\x14\x86\x9f\xff\xe9\xcc\x3d\xb7\xbe\xc8\xf3\x8b\x9a\xb1\x76\x5d\x6e\x62\x97\xc3\xd6\x7a\x4e\xd0\xed\x14\x75\x9b\x87\x9d\x97\x37\x68\x0b\xa9\x7b\x48\xce\x30\x29\x83\x8e\xf0\x6f\x02\x4c\x61\x9a\x80\xa4\xa2\x14\x45\x84\x26\xd2\xcc\x80\xaf\xa3\xe3\xd1\x11\xcc\x15\xc2\xe8\x0c\xb8\x30\x59\xd8\x24\x79\x36\xc9\x16\x58\xe6\xb8\xb6\x0f\x77\x70\xe1\xce\x46\x6f\xd9\x7c\x67\x4c\xb4\x9d\xd8\xf0\xe2\x8e\x7e\xdc\xc6\x93\xae\x2f\x3b\xe8\xbf\x24\x55\x0c\x47\x4f\xc4\x69\xbf\x32\xfb\xac\x29\x87\x1e\x86\xee\x18\x35\x95\x00\x6b\x66\xd9\x9e\xfc\xf1\xa4\xb5\x32\x7f\x33\xba\xc2\xf0\xeb\x7d\xe3\x44\x27\x74\x9e\xb6\x2b\xb6\xe7\x42\x58\xad\xee\xcc\x30\xd1\xe8\xcd\x08\x32\x09\x1f\xdf\x8c\x2a\xd2\xe9\x28\x17\xa3\x14\x99\x7c\x10\x49\xcf\xd1\xdb\x5b\x39\xe9\x99\xf8\x68\x7d\x52\x21\x4f\xad\x4b\x2a\xeb\x72\xc5\xa3\x5c\xf0\x58\x1f\x3c\x5d\xc8\x35\x93\xc1\x6e\xb9\x60\x8b\x54\xb0\x69\x23\xd0\x45\xd7\x56\xb5\x56\x73\x53\xd3\x7e\xe1\x55\x26\x62\x1a\xf6\x87\xe0\x37\x73\x7c\x8c\x8f\x79\xd2\x3e\x96\x30\xc7\x02\x1b\x83\x6d\xdd\x31\x41\xc4\xc4\x6f\x1a\x52\x2e\xae\x8d\x0c\xd4\x18\xc0\x9f\x7d\xab\x54\x11\xe8\x7f\xf6\xa9\x35\x7f\x11\x83\xc9\x07\x11\x2a\xf0\xdf\xc2\xeb\xa0\x3f\x00\x11\xb8\x00\xfe\x6e\x5a\xea\x32\xd5\xae\xf9\xe8\xa9\xe8\xc7\x80\x7f\x73\xdc\x52\xca\xab\x66\xd6\xf0\x3f\xf9\xb2\x71\x2b\x7f\x7e\x78\x11\x04\xe1\xcf\xc4\xfb\x93\xd1\xc6\x76\xa6\x2b\x74\x5f\x6f\xbd\x6d\x53\x6c\x10\xbe\x13\xb1\x1f\x84\xa7\x6a\x2b\xf2\xfa\xc9\xc6\x67\x49\x82\x91\xc6\xb8\x3a\xcd\x92\xa8\xcc\xc9\xfa\xbb\xe2\x45\x4b\xb0\x9d\x37\xe4\x09\x70\xa1\xfd\x72\xdf\x00\xfe\xbe\x05\x9f\x6d\xbc\xed\xcb\x13\x29\x8d\x95\x25\xe3\x42\x7f\x30\x87\xb5\xcb\xa9\xba\x3c\x32\x0c\x34\x9e\x49\x2e\x74\xe2\xf7\x57\x29\xc6\x7f\xb1\x08\x80\xa5\x12\x59\x7c\x0f\x51\x26\x84\x91\x92\x98\x87\x41\xcc\x13\x43\x88\xba\xa0\xa6\x7a\x5a\xdf\x22\x20\x6f\xa8\x9a\x97\xc4\xeb\x54\xf1\xd4\x99\x95\xe7\x00\x0b\x96\xce\xd1\xad\xc5\x8b\x6a\xfc\x70\x50\xd1\x6b\x5d\x68\x7f\x23\xc4\xd6\x9c\x46\xe5\x6f\x61\x8c\x9d\xf8\xee\xd1\x84\x57\x4a\x5f\xd5\xe0\xf6\xf7\xc0\x7e\xe6\x58\xf2\xd8\x58\x24\x1f\x40\x57\x3d\x61\xc7\x20\x0d\xe2\xb1\xf3\xbd\xca\xe4\x20\x4a\x3d\xaf\x24\x26\x10\x49\x34\x9f\x86\xa8\x39\xb5\x27\x45\x5c\xc0\x24\xd3\x57\x70\xcb\xee\x95\xdb\xa4\xe6\xad\xfa\xe5\xe9\xcf\x6c\x9c\x6f\xa5\xb6\xbe\x50\x28\xd7\x91\xcd\x3a\x82\x2a\x3e\xdf\x6d\xdb\xa9\x6e\xd5\x28\x37\x00\xb3\xa8\x31\x51\x78\xab\xf0\x7a\x79\x5c\xf8\x6f\xf3\xd4\x5f\x9c\x1f\x5e\x0c\x60\x71\xfe\xfa\xe2\x81\xfe\xa9\xe3\x88\xf1\x87\x16\xa6\x2b\x81\x34\x2a\x7d\xf3\xd7\x4e\xf8\x8f\xce\xf7\xbb\x9d\xde\x3f\x87\x4a\xb7\xcb\xaf\xf5\x31\xc7\x77\x68\x6f\x56\x9a\xfd\x73\x29\xfd\x4f\x22\xc2\x59\x38\x92\x7e\xf0\xe8\xaa\x61\x6d\x29\xf4\xe3\x50\xd5\x09\x2a\x2a\x66\x66\x03\xcb\x76\x5b\x56\x34\xcf\x02\x5c\xbf\x78\x65\x93\x09\x84\x2c\xe9\xe8\xa1\x5e\x2c\x1e\x55\xde\x5c\xe3\xbd\xda\x4c\x95\x07\xab\x20\xa7\xcf\xac\xd2\xf7\xc3\x71\x5e\x7d\x0f\x2d\x23\xc8\xb9\x06\x50\xd8\xcc\x14\x12\xaa\xf0\xb2\xd2\x92\x22\x35\x7c\xa7\x33\xee\x6f\x2e\x35\xf5\x02\xf5\xd9\xaf\xda\xf4\x50\xd7\x7c\xb5\x75\x41\xd0\xba\x7d\x60\xa5\x23\xb9\xb6\x12\xc5\x59\xab\xae\x41\x9c\xa3\x19\xb7\x80\x29\xb6\xf9\xab\x76\xa0\x8f\x6e\x9b\xca\x4d\x1b\x24\xb3\x6a\xfe\x9f\x64\x97\x07\xcd\xf2\xc3\x5a\xcb\xf5\x36\x72\x60\xf5\x7f\x62\xfe\xc5\x89\xb9\xc4\x42\xeb\x8a\x4c\x75\x6d\x4e\xdf\x85\xef\xb3\xe9\x94\x6b\x7f\xbb\xdb\x70\xf5\xbb\xe6\x4d\xcd\xf6\x15\x4d\x1b\xaf\x65\xcf\xeb\x5c\x99\x14\x3c\xed\xe5\xbd\x9e\x73\x8d\xf3\xc1\x7b\xaf\x6e\x89\x96\xbb\x57\x69\xba\x53\xc7\xfa\xb4\x51\xd4\x65\x5d\x89\xc0\x5a\x44\x15\xbc\x5f\x69\x78\xb0\x0f\xc5\xdf\x5c\x99\xcb\x55\xd7\xe2\x36\x13\xc0\x34\xe8\x2b\xae\x60\x96\x71\xa1\xab\x8e\xb5\x69\xe7\x4a\xbb\xff\x05\x00\x00\xff\xff\x3e\x38\xc8\xfd\xd4\x2b\x00\x00") + +func templateDialectSqlUpdateTmplBytes() ([]byte, error) { + return bindataRead( + _templateDialectSqlUpdateTmpl, + "template/dialect/sql/update.tmpl", + ) +} + +func templateDialectSqlUpdateTmpl() (*asset, error) { + bytes, err := templateDialectSqlUpdateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/dialect/sql/update.tmpl", size: 11220, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateEntTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x4b\x8f\xe3\xb8\xf1\x3f\x5b\x9f\xa2\x56\xf0\x2c\x24\xc3\x2d\xef\xe1\x8f\xff\xa1\x17\x3e\x24\xe3\x2c\xe0\x43\x66\x93\x79\x6c\x0e\x83\xc1\x0c\x5b\x2a\xd9\x4c\x4b\xa4\x87\xa4\xdc\xdd\x50\xf4\xdd\x83\x22\x25\x99\x92\x35\x6d\xcf\x74\x80\x5c\x72\xb2\x45\x16\xeb\xf1\xab\x27\x59\xd7\x90\x61\xce\x05\x42\x58\xca\x0c\x8b\x10\x9a\x26\x08\xea\x1a\xe6\x87\xfb\x1d\xdc\xae\xe1\x8e\x69\x84\x79\xf2\x5a\x8a\x9c\xef\x92\xbf\xb1\xf4\x9e\xed\x90\x88\xea\x1a\x0c\x96\x87\x82\x19\x84\x70\x8f\x2c\x43\x15\xba\x53\x2d\x87\xd3\x2e\x2f\x0f\x52\x99\x10\xe6\x76\x6b\xb5\x02\xe2\x9f\xbc\x61\x25\x31\x02\xae\xc1\xec\x11\xac\x78\x40\x61\xb8\x79\x82\x5c\x2a\xbb\x38\x20\xd4\xe9\x1e\x4b\x96\x04\xe6\xe9\x30\xde\x31\xaa\x4a\x0d\xd4\xc1\x2c\xb5\x7a\x06\xb3\xd5\x0a\xb6\x1b\x90\xb9\xe5\x82\xc2\x24\xc1\x6c\xbb\x71\x87\xb6\x9b\xe4\x3d\x71\x68\x1a\xf8\xf2\x4f\x2d\xc5\x6d\xc8\xb3\xa5\x2c\x39\xe9\x6b\x9e\xc2\x2f\xc1\xac\xae\x41\x31\xb1\x43\x98\x7f\x5e\xc2\x3c\x27\x18\xe6\xc9\x6f\x1c\x8b\x4c\xc3\x4d\xd3\x04\xb3\x99\xb3\xe1\xc0\x74\xca\x0a\x98\xe7\xbd\x22\x7b\x49\x34\x24\xf3\xc8\x8a\x0a\x3b\x05\x42\x12\xdc\x53\x85\x90\x13\xaf\x24\x00\x00\x98\x4d\xf2\xa9\x6b\xe0\xb9\x5d\xa8\x8a\x82\xdd\x15\xb4\xb8\xa8\x6b\x40\x91\x41\xd3\x38\x6e\xbd\x11\xee\xf3\x9d\xc5\xe0\x3d\x23\xfc\x9d\x0d\x44\x6c\xd5\x1d\xda\x83\xce\x9e\xbf\x64\x3b\xec\xcc\xa9\xeb\xd5\x02\xf8\x4e\x48\x85\xb0\x43\x81\x8a\x19\x2e\x76\x80\xd9\x0e\x9d\xae\x1a\x16\xab\x96\xf2\x06\x1e\xb8\xd9\xc3\x1c\x3d\x89\x8e\xcb\x08\x15\xbc\x84\x0a\xe9\x7d\x22\x22\x61\x09\xbc\xef\x89\x34\x1a\x30\x12\x04\x2f\x96\xc0\x44\x06\x7a\x2f\xab\x22\x83\x3b\x84\xea\x90\x31\x83\x19\x94\x4c\x54\xac\x28\x9e\x12\x92\x3d\x29\xd8\xc1\x28\xa4\xa1\xc5\x0f\x82\x7f\xad\x68\xf9\xe3\xa7\x1e\xc9\x85\xd3\x81\xa0\xec\x0f\x7d\x71\x6b\x23\x3c\xc7\x80\x76\xff\xdb\x6c\x51\x98\x22\x3f\xa2\x72\xd8\xbe\xed\xbe\xda\x78\xff\x4d\xc9\xf2\x2d\xea\x83\x14\x1a\x41\xa7\x4c\x38\x34\x76\x0a\xcb\x82\x0b\x50\xdd\x56\xc6\x0c\x03\x2e\x8c\x1c\x84\x77\x12\xe4\x95\x48\x21\x1a\xc8\x69\x1a\x58\xf8\x44\xf1\x40\x48\xa4\x50\xc3\xa2\xe5\x9f\x74\xab\x31\xa0\x52\x52\x51\x9e\x1c\x4b\x76\x58\xd2\x27\x29\xac\x50\x27\x6f\x91\x65\x7f\x10\xf0\x7f\x65\x87\x28\x0e\x66\x3c\xb7\xbb\x3f\xad\xc9\x03\x74\x62\xa6\xd0\x54\x4a\xd0\x6a\x30\xb3\x18\xdc\xc0\x9c\x6c\x21\x0e\x07\xc5\x85\x81\xf0\x18\x0e\x34\x0c\x66\x47\xa6\xac\x29\x96\x6e\x90\xa8\x94\x8e\x70\x96\x90\xdf\xca\xc8\x67\x53\xd2\x05\x1f\xe9\xf3\x4c\x22\x6d\xf5\x7b\x6e\x97\xb8\x30\xff\xff\x7f\xe4\xc0\x42\xa3\xcb\xa5\x6b\x33\xad\x5f\xed\xb4\x1c\x64\xf5\xb9\xc2\x8e\xd8\x82\xd5\xa2\x79\xbb\x06\x02\x3e\xd9\x60\x2a\x33\x8c\x7e\xf6\xa0\x89\x7f\xbd\x88\xf7\x00\xdb\x64\xbb\x81\xb5\x8f\x6d\xb2\xdd\x3c\x5f\xb9\xfa\xfc\x1d\xb0\x99\x2c\x3f\xc4\xf8\x66\x0c\x9c\xe1\xa5\x4d\xa2\xc7\xc8\x97\x3a\x09\xfb\x12\x7e\x89\xc1\xc7\xf8\xe6\xc2\x01\x0f\x2d\x3f\xcf\x5a\x08\x04\x2f\x02\x2f\x91\xe4\x83\xf6\x92\x48\x7f\x2d\xfe\x93\x09\x24\x1f\x74\xa4\x48\xc2\x42\x7f\x2d\x12\xfa\xf4\xb2\xe6\x64\xc8\x8b\x42\x9e\x80\xa5\xa8\xdf\xea\x77\x46\x51\xa1\xb5\x51\x39\x88\x49\x3f\x2b\x3c\x70\x5e\x98\x07\x52\x0d\x22\x7d\x9e\x27\xbf\x1f\x0c\x97\x82\x15\x7d\xac\xd3\xa6\x27\xf7\xa4\xd0\x79\x1a\x9c\x45\xf9\x6a\x65\x3d\x22\x55\x86\x0a\xf6\xa8\xd0\x2b\xda\xd6\x55\xa4\x0c\xd3\xc0\x85\xfd\xb4\x85\xd6\x1b\x29\x92\xd7\xb2\xa8\x4a\xa1\xbf\x24\x7e\xc2\x90\x33\x92\x77\x29\x13\x11\x59\xf7\xf3\x30\xe2\x97\x9d\xc5\x17\x40\x19\x9e\x9b\x8e\xd9\x8e\x55\x67\xd0\x0f\x67\xe4\x84\x7b\xb5\x51\xa9\x14\xc7\x64\x6b\x24\x8b\x86\x26\xc4\x43\x98\x4f\x1b\x83\x9c\xb8\x64\xe1\x29\x5f\xbd\x3a\xd6\x59\x34\x4a\x64\x0b\xc8\x58\xf9\x6f\xd4\x81\x11\x6e\xe7\x34\x3d\x6a\xce\x04\xcb\x9b\xe7\x70\x11\xee\xe4\x0f\x56\xf0\xcc\xc2\xfa\x7d\xda\xd8\xbc\xcb\x21\x7c\xa5\x93\x57\x3a\x6c\xa5\x44\x43\xea\x18\xfe\xe5\xc7\xb2\x05\xaa\x53\xae\x19\xfb\xf9\xa4\xbe\xc3\xc9\x4b\x89\xe0\x7a\xd5\x5e\xac\xd9\x08\xc4\xeb\xc5\x5e\x72\x8f\x6f\xab\xff\x7f\x58\x5c\x9f\x99\x11\xdb\xaa\x3c\xb7\x45\xf4\xd4\xec\xff\x5e\xa1\x7a\x0a\x4f\x06\x62\x6b\x60\xe3\x2a\x41\x7f\xa2\x69\xe0\x6b\x85\x8a\xa3\xfe\xc6\xd4\xe7\xcf\x83\xa7\x92\x3d\xbb\xaa\x66\x7b\x52\xa2\x18\xc6\xe3\x5c\xd3\x58\x25\xfd\xe4\x75\x4d\xb7\x63\xf0\xba\xe0\x28\x4c\x0d\x63\xb4\xdd\x35\xa2\x89\x13\x9f\xff\x88\x28\xa6\x22\x70\x4a\x52\x6a\x4f\x1f\xec\x64\x0a\x4e\x94\x06\x06\x77\x15\x2f\xa8\x18\xd2\x7d\xc6\x8e\xad\x54\x0e\xcc\x9e\xeb\x91\xb1\xab\x15\xbc\x91\x86\xaa\x24\x33\x4b\x78\x92\x15\x08\xc4\x8c\xe6\xdf\x94\x15\xc5\x90\xf8\x83\x78\x50\x34\xa3\xc1\x1d\xe6\x34\xb0\x13\x45\xcf\xb6\x44\xb3\x97\xd9\x92\x42\xf9\x4c\x0c\x49\x79\x60\xba\x55\x0f\x33\xc8\x95\x2c\x81\x81\x51\x4c\x68\x96\x52\xd4\xbb\x51\x9b\x9c\xe1\x2d\xda\x43\xa9\x2c\x4b\x6e\x68\xec\x96\x0a\x94\x2c\x0a\xcc\xe0\x8e\xa5\xf7\x57\xf6\x56\x87\x4c\xe7\xa2\x6e\xdd\xad\xfe\x2e\x90\x3c\xf4\x63\x0e\xea\x59\x9c\xbb\xa7\xf5\x89\x85\x0b\x2a\xfb\xa3\xbb\xfb\x20\x5d\x32\x09\xec\x4b\x80\x00\xcb\x0d\x2a\xe0\x8e\x30\x2d\xa4\xc6\x6c\x49\x6c\xb5\x74\xe7\xc9\x3d\x02\x1f\x4d\x1f\xe3\x0f\xbc\x28\xa8\xe1\xe1\x23\xa6\x15\xe1\x65\xf6\x4a\x56\xbb\xbd\x95\x9c\x29\xab\xdd\xc3\x9e\xa7\x7b\x48\x15\x32\x47\x30\x80\xfb\x5a\x44\xbb\x30\x18\xac\x13\x90\xe6\x71\x09\xf2\x9e\x32\x75\x1a\xb5\xc4\x69\x91\x44\x0b\xf3\xb8\xb1\x7f\xdd\xbc\xff\x93\xbc\xb7\x99\x72\x60\x82\xa7\x51\xd8\x3d\x02\x34\xcd\xed\xd9\x85\x9d\xae\x54\x03\x9c\x58\x77\x75\x0f\xe3\xc9\xde\x38\x90\x0c\x6b\x30\x8f\x49\xa6\x8e\xbd\xd3\x47\xe4\xad\xeb\xda\xf6\xc9\xcb\x43\x81\x25\x0a\xe3\xbc\x97\x97\x26\x71\x3b\xa8\xae\xc4\xca\x91\x47\x31\x0d\x63\xc4\xb1\x0e\x66\x77\x95\xed\xa2\x77\x4f\x06\x75\xf2\x06\x1f\xfe\x5c\xe5\x39\xaa\x48\xf0\x22\xb6\x9b\xc9\x3f\x14\x37\xd8\x1e\x0c\x7d\x76\x51\x38\x41\x61\x95\x72\x0d\x20\x0a\x79\xb6\x7e\x75\x5c\x86\xcb\x33\xfc\xb7\x9b\x38\x1e\x74\x73\xfe\x6c\x37\xdf\x19\x98\x73\xf8\xa5\xed\x08\x67\x4a\x2d\x81\x34\x39\xef\x65\x93\x73\x00\xcf\xe1\x38\x15\x12\x53\x4d\xe3\x57\x38\xfa\x63\xcf\xb9\xe8\x81\xb5\x83\x5b\xd0\xfa\xd5\x31\x5c\xc2\xe2\x48\x76\xba\x5e\x3b\xea\x6b\xdf\xcb\xea\x1a\x7d\xe3\x31\x0c\xfe\xff\x33\xd8\x62\x42\xad\x8d\x3b\xda\xec\x82\xc3\x75\xc1\x9b\xa9\xf1\x8d\x82\x91\x67\x7d\x5d\xa7\x28\xa4\x16\xa8\xf0\xa0\x50\xa3\x30\xcc\x96\x8a\xb6\x8d\x6d\x37\xdd\xfb\xce\x55\xb1\xc9\xb3\x28\xb6\xdc\xea\x60\xc6\xb3\x25\x7c\x26\x2f\x75\xf3\xe2\x9f\x8c\xe4\x63\x06\x14\x45\xbd\x01\x3c\x0b\x9a\xc0\x33\xd7\x3e\x46\xe8\x82\xa7\xb6\x83\x1f\x8a\x4a\x11\x52\x7e\x07\x38\x11\xb8\x54\x66\x70\x60\x4a\xdb\x48\x71\xcb\x32\x1f\x35\xa7\xfe\xbd\xad\x3f\xf6\xf1\xd3\xc0\x88\x17\xbc\x71\x74\x2c\x9f\x05\xab\x23\xfa\x2f\xbe\x71\x5c\x7f\xdf\xfb\xf8\xe9\x7f\x8f\x1c\x3f\xfc\xc8\x41\x73\xd2\xe7\xa5\xab\x54\x0e\x0a\x1f\x5a\x3a\xb0\x18\x47\xc8\x1a\xd8\xe1\x80\x22\x8b\xc6\x3b\x4b\x18\x4c\x12\xb6\x92\x6d\x37\xb7\x70\x3c\xdd\x1b\x2f\x5e\x1b\x27\xf1\xbe\x7d\xfe\x75\xe4\x38\x59\xa3\x46\x4f\x22\xd3\x44\x3d\x9e\x9d\x7e\xfd\x4b\xc8\xcc\x8d\x9b\x2f\x7d\x11\xf9\x91\x74\x7b\xf6\x45\x24\xb7\xd3\xe0\x03\xb5\xd0\x47\x13\xc5\xd6\x45\xd7\x64\xcd\xcc\xf7\xeb\xed\x7a\xe4\x2a\x22\x38\x45\x93\x7f\xc7\x19\x68\x74\x1e\x4e\x83\x78\xb2\xed\xe7\x7b\xe2\xc5\x8f\xd4\x09\xb4\xa7\x21\x1b\x22\xe6\xe6\x9c\x28\xcd\x77\xed\xdf\xb8\x43\x89\x0f\x63\x7a\xc0\x22\x38\xbb\xef\x7d\xe4\x9f\xda\x99\x09\xd6\x90\xe6\x3b\x52\xc7\xbf\x6d\xfc\x3b\x00\x00\xff\xff\xef\xee\x8f\x73\xb8\x19\x00\x00") + +func templateEntTmplBytes() ([]byte, error) { + return bindataRead( + _templateEntTmpl, + "template/ent.tmpl", + ) +} + +func templateEntTmpl() (*asset, error) { + bytes, err := templateEntTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/ent.tmpl", size: 6584, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateExampleTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x56\x4d\x6f\xe3\x36\x10\x3d\x8b\xbf\x62\x4a\x28\xad\xb4\x70\x24\xb4\xa7\x85\x17\x3e\xb4\x59\x67\x61\xa0\x70\xd2\xdd\x14\xe8\x2d\x60\xa4\x91\x42\x84\x26\xb5\x14\xe5\x38\x50\xf5\xdf\x8b\x21\x65\xcb\x5e\x27\x45\x81\xbd\xf4\x62\x58\xa3\xf9\x78\x33\xf3\xde\xd8\x7d\x0f\x25\x56\x52\x23\x70\xdc\x89\x4d\xa3\x90\xc3\x30\xb0\xbe\x87\xb8\x79\xaa\x61\xbe\x80\x07\xd1\x22\xc4\xd9\x95\xd1\x95\xac\xb3\x5b\x51\x3c\x89\x1a\x47\x1f\x87\x9b\x46\x09\x87\xc0\x1f\x51\x94\x68\x79\x88\x1a\x06\xc6\xe4\xa6\x31\xd6\x41\xc2\x22\xae\x4c\xcd\x59\xc4\x1d\xb6\x4e\x6a\xff\xd5\xb4\xf4\xa9\xd1\xe5\x9d\x55\x9c\xb1\xa8\xef\xc1\x0a\x5d\x23\xc4\xf7\x33\x88\x35\xd5\x8d\xb3\xb5\x29\xb1\xa5\x64\x51\xc4\x09\x90\x3e\x07\x91\x07\xfb\x64\xe0\x94\xeb\x12\x50\x97\x1e\x45\xc4\xab\x87\x22\x47\xed\xf2\x52\x0a\x85\x85\xe3\xa3\x49\xc9\x87\xbc\x36\x79\x6d\x71\xa3\xa4\xe6\x2c\x65\xbe\x67\xd4\x5b\xaa\xdd\x35\x0d\xda\xd0\xcb\xdf\xd0\x58\xa9\x5d\x05\xfc\xa2\xbd\x5f\xad\xef\x96\x9f\x3e\xff\x7a\xb7\xba\x59\xdf\x2f\xd7\x1f\x6f\x6f\x56\xeb\x3b\x3f\x2f\x96\xe7\x54\xb3\x31\x52\x3b\xa8\x8c\x05\xf7\x88\x50\x0a\x27\x68\x7a\x19\xac\x34\x18\x5b\xa2\x05\x67\xc0\x76\xda\xbf\xa5\x71\xb4\xa0\x4c\x21\x94\x7a\x99\x1d\xcc\x95\x51\xca\x3c\x4b\x5d\x43\x61\x36\x1b\xa1\xcb\x39\xcb\x73\x96\xe7\x11\xec\xf1\x0d\xc3\x82\x3f\x3a\xd7\xcc\xf3\xdc\x47\x3f\x9a\xd6\xcd\xdf\xff\xfc\xfe\x17\x0e\xb5\xf1\x69\xe1\x72\x4b\x41\x5b\x61\x27\x50\xef\xc6\x56\xb3\xe5\x68\x61\xac\xea\x74\x01\x52\x4b\x97\xa4\xd0\xb3\x48\x56\x80\x33\x30\x4f\x34\x00\xd3\x66\xbf\x1b\xf3\xd4\x35\x4b\xbd\x4d\xf8\x54\x99\xa7\x1f\xc8\xa3\x67\x11\xb9\x77\x33\x40\x6b\xfd\xc0\xac\xca\x6e\x85\x6d\x31\xc1\xf4\x83\x37\x2e\x16\xa0\xa5\xf2\x9e\xd1\x01\xc4\x02\x7e\xfc\x16\x46\xdf\xd1\x7e\x07\x16\x0d\x6c\xf0\x3b\x78\x8b\x07\x97\xc3\x10\x10\x2f\x03\x4f\xfb\x1e\x1a\xd1\x16\x42\x11\x01\xd6\x62\x43\xdb\x9f\x1a\x39\x54\x9c\x60\x58\x74\x9d\xd5\x54\x28\x2a\xdc\x8e\x52\x17\x46\x3b\xdc\xb9\xec\x37\x51\x3c\xd5\xd6\x74\xba\x4c\x52\x16\x15\x46\xeb\x43\x63\x7b\xb8\x6b\x7c\xbe\x52\x12\xb5\x4b\xf6\x96\xc0\xc5\x7e\xdf\xc7\x1c\xde\xed\x8b\x0e\x69\xc0\x60\x2d\xfc\x30\x95\x57\xa6\xce\xae\x85\x13\xaa\x4a\x78\x25\xa4\xc2\x12\x0a\x8b\x82\x24\x71\x60\x0a\x14\xbe\xc6\x1c\x2e\xb6\xdc\x43\x48\x03\x5c\x6f\x25\x38\x13\x8c\x8f\x56\x6e\xd1\x26\x23\xad\x09\xdf\xa7\x00\x2c\x21\xfc\x69\x9a\x32\x16\xe5\xf9\x54\x62\x8b\xd6\xc9\x02\xdb\x03\x3b\xfb\x1e\x94\x79\x26\x9a\x1f\xc6\xf7\x53\x0b\x58\xd6\xd8\x66\xc7\x82\x94\x33\x88\xd1\x2f\x42\x67\x4b\x7a\x1b\x14\x49\x2a\x93\x15\x68\xe3\x20\xc6\x6c\xd5\xae\xf4\x16\x6d\x8b\xe1\xa5\x7f\x1b\x7b\x29\x4d\xea\xb9\x28\x39\xb9\xde\xbd\x34\x98\x7d\xc6\x02\x09\x3f\xc4\xf2\x34\x62\x18\xfc\x62\x7c\x8f\x99\xe7\x5d\x08\x18\x11\x66\xe4\x1a\x45\x57\xd4\x15\x26\x69\x78\x3c\x25\x4d\xe5\xb1\x8e\x61\xd7\x12\x55\x39\x22\x1e\x8b\xec\x49\x53\x85\x9c\x93\xbe\xbf\xa0\xbb\x68\x49\xcd\x09\xd5\xad\xb2\x91\x67\x57\xa6\xa4\xca\x53\xad\xf1\xb6\xf8\x84\x5f\xc4\x16\xff\x4a\x0a\xb7\x4b\xe9\x99\x56\x7c\x4b\xd9\x94\xf6\xa2\x19\xe7\x7b\xda\x42\x58\x09\x96\x73\x3e\xf3\x92\xa6\x9e\x53\x76\x92\xf9\xe8\x82\x1d\x76\xf8\xea\xc2\xfc\x56\x71\x07\xcf\xd2\x3d\x82\x74\x27\xeb\x8b\xf5\x34\xe5\xb3\xa9\xea\xe3\x81\x1e\x4d\xf3\xb5\x51\xea\x93\x29\x7e\xe7\x08\x4f\xba\xfc\x7e\x8a\x99\x86\xc2\xb8\x28\x4b\xaa\xda\xf7\xe4\x1f\x63\xf6\xa7\x96\x5f\x3b\x0c\x16\xf2\x59\x00\x6f\xd1\x8d\x2e\xd3\xfe\x7c\x0a\x7f\x53\xf6\x44\x85\x64\xdf\x9b\x69\xd2\xe9\x21\x2c\x2f\x3d\x0b\x0b\x8d\xfe\x17\x8a\x7b\xfa\x9c\x36\x7f\xc0\x71\x4c\xa2\x37\x28\xa4\xdf\x62\xcf\xc9\x92\x47\xcd\x7f\xed\xd0\xbe\xfc\x0f\x94\x1c\x8e\xe8\xe2\x1c\x67\x76\x74\xbb\xf1\x5b\x0e\xfd\x41\xe8\x47\x16\xa5\xd9\xb5\xb4\xad\x3b\x08\xec\xfc\xaa\xbe\x7a\x57\xfd\x00\xe8\xe8\x85\x03\x32\x8e\xee\xe4\xaa\xfa\x9f\x9c\x73\xc5\x4e\xde\x50\xd1\xef\xc1\xbf\x8a\xf4\x58\xa3\x37\x9d\x6b\x3a\x37\x67\xfe\x6f\xd1\xfe\xdf\xc7\xf4\xf5\x9f\x00\x00\x00\xff\xff\x16\x21\x27\x32\x67\x09\x00\x00") + +func templateExampleTmplBytes() ([]byte, error) { + return bindataRead( + _templateExampleTmpl, + "template/example.tmpl", + ) +} + +func templateExampleTmpl() (*asset, error) { + bytes, err := templateExampleTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/example.tmpl", size: 2407, mode: os.FileMode(420), modTime: time.Unix(1560441183, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateHeaderTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xaa\xae\x56\x48\x49\x4d\xcb\xcc\x4b\x55\x50\xca\x48\x4d\x4c\x49\x2d\x52\x52\xa8\xad\xe5\xd2\xd7\x57\x70\xce\x4f\x49\x55\x48\x4f\xcd\x4b\x2d\x4a\x2c\x49\x4d\x51\xd0\x70\x80\xb3\x35\x15\x92\x2a\x15\x52\xf3\x4a\x92\x75\x14\x5c\xfc\x15\xfc\xfc\x43\x14\x5c\x5d\x3c\x43\xf4\xb8\xb8\x0a\x12\x93\xb3\x13\xd3\x53\x15\xaa\xab\x15\x54\x40\xa6\x54\x57\x2b\xa4\xe6\xa5\x28\xd4\xd6\x02\x02\x00\x00\xff\xff\xa2\xeb\x84\xa5\x65\x00\x00\x00") + +func templateHeaderTmplBytes() ([]byte, error) { + return bindataRead( + _templateHeaderTmpl, + "template/header.tmpl", + ) +} + +func templateHeaderTmpl() (*asset, error) { + bytes, err := templateHeaderTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/header.tmpl", size: 101, mode: os.FileMode(420), modTime: time.Unix(1554708928, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateImportTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\x41\x6e\xeb\x20\x10\x86\xd7\x70\x8a\x11\xca\x22\xc9\xc2\x1c\xe2\x5d\xe0\x49\x3d\x40\x44\xf0\x18\x8f\x0a\x83\x03\xa8\xad\x84\xb8\x7b\x85\xe3\x34\x8b\xa6\xf2\xee\xe3\xe7\x63\x34\xfc\xb5\xc2\x88\x13\x31\x82\xa2\xb0\xc4\x54\x14\xb4\x26\xef\x08\x47\x29\xd4\x14\x8a\x92\x42\xd9\xc8\x05\xbf\x56\xc4\x94\x62\xca\x9d\x72\x49\x36\xf2\xc7\x86\xc4\x6e\x4d\x0b\x05\x54\x52\x8a\x5a\xf5\x19\xc8\x71\x4c\x08\x0e\x19\x53\x21\x76\x10\x19\x5c\x32\xcb\x0c\x05\xc3\xe2\x4d\xc1\x0c\x67\xdd\x5a\xd7\x81\x26\xe0\x58\xe0\x88\x37\x38\x0c\xff\x22\x4f\xe4\x86\xff\xc6\xbe\x1b\x87\x70\x78\xd0\xa9\x2f\x28\x84\xaa\xf5\xb7\xd4\x9a\x5e\xe3\xe7\x59\x49\xd1\x27\x7f\x52\x99\x9f\xfa\x9b\x9d\x31\x18\x80\xd6\xa0\x8f\x19\xba\x07\xb5\x02\xf2\x08\xdb\x2a\x1b\xf6\x02\xae\x56\x23\xaf\x3f\xdf\x50\x8f\x64\x3c\xda\x57\x91\xce\x37\xaf\x1e\xaf\x3c\x5d\xb5\x8b\xda\x25\x0c\x9e\x58\xbd\x4c\xf5\x5a\x86\x1e\xb3\xdf\xbb\xd7\x97\xcb\xae\xe2\x76\x8d\xe5\x0f\x03\xd9\xc6\x91\xd8\xdd\xd5\x1c\x59\xc9\x93\xfc\xe9\xe1\x3b\x00\x00\xff\xff\xbe\x62\x2e\x09\x26\x02\x00\x00") + +func templateImportTmplBytes() ([]byte, error) { + return bindataRead( + _templateImportTmpl, + "template/import.tmpl", + ) +} + +func templateImportTmpl() (*asset, error) { + bytes, err := templateImportTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/import.tmpl", size: 550, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateMetaTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x56\x4f\x6f\xe3\xb6\x13\x3d\xcb\x9f\x62\x7e\xfa\x69\x01\x1b\xb0\xe5\xdd\x1c\x53\xf8\xb4\xdb\xa2\x41\x37\x8b\x6d\x37\xe8\x25\x08\x16\xb4\x38\x8a\xb9\xa1\x29\x97\xa4\x9d\x18\xaa\xbe\x7b\xc1\xbf\xa2\x1c\xd9\x41\x81\x9e\x6c\x91\x9c\x99\xf7\xe6\xcd\x0c\xd9\xb6\x40\xb1\x66\x02\x21\xdf\xa2\x26\x39\x74\xdd\x64\xd2\xb6\xa0\x71\xbb\xe3\x44\x23\xe4\x1b\x24\x14\x65\x0e\x45\xf9\x95\x54\x4f\xe4\x11\x5f\x1f\x61\xdb\x5d\x23\x75\x0e\x85\xdd\xaa\x1a\xa1\x34\x4c\x27\xd9\x72\x09\x9f\xc9\x1a\x39\x6c\x1a\x4e\x15\xe8\x0d\x82\xd2\x92\x89\x47\xe0\x76\x99\xa2\x68\xb4\xf9\x34\x3b\x6d\x0b\xbc\x79\x46\x09\x45\xf9\x85\x6c\x4d\x10\xd0\xc7\x1d\x02\x13\x76\x9b\x12\x4d\xd6\x44\x61\x39\xc9\x9c\xcf\x15\xe4\x6d\x0b\x45\xe9\xbe\xba\x2e\x9f\x64\x6d\x0b\x92\x88\x47\x84\xe2\xfb\x1c\x0a\x84\xeb\x15\x14\xe5\xcf\xf4\x11\x15\x74\x9d\x39\xec\xc2\x9a\x65\x2c\x3f\x1a\x94\x44\x68\x58\x74\xdd\x24\x33\xb6\xac\x36\xeb\x37\xea\x46\x1c\x50\x2a\xb4\x36\x8b\x60\x64\x6d\xfc\xce\x89\xa9\xe1\xd9\x7b\xef\xba\x7f\x49\x17\x23\x5f\xe6\x03\x23\x7d\xc4\x73\xe4\x0d\x50\xe4\x0a\xff\xeb\xd8\x6f\xc6\x14\xb4\xcf\x54\x1f\xce\xab\x80\x27\x32\xc4\xd3\x43\x49\x6a\x27\xc9\x2f\x0c\x0d\xc8\x85\x13\xa5\x36\x5f\x76\xa3\x3e\x15\xc5\x93\x73\x27\xc6\xc8\x9d\xa1\x55\x47\x5a\x07\x94\x1a\x5f\x60\x27\x9b\x1d\x4a\x7d\x3c\x43\xae\x8f\xe0\xf8\x28\x41\x9e\x30\x71\x93\xf7\x05\x52\x97\xbf\x12\xf5\x09\x6b\xb2\xe7\x1a\x7c\x3e\x16\x50\x50\xbf\xe2\x68\xf8\xfd\x71\x36\x34\x1a\x27\x7c\xc2\xe2\x81\xf0\x3d\x42\xdd\xc8\xc0\x28\xe1\x62\x41\x06\xc4\x89\x97\x88\xec\xee\xb8\xc3\xf2\xcb\x7e\x8b\x92\x55\x7e\xc3\xaf\xfa\x2f\x23\x8b\x25\xd9\xb6\xb0\x93\x4c\xe8\x1a\xf2\x77\xff\x3f\xe4\x09\xe6\xc0\x69\x20\x61\xfc\xef\x29\x94\x37\x9f\x7a\xa9\xde\xd4\x85\x51\x07\x7d\x24\xf9\x63\xce\x06\x12\x98\xcd\x5e\x84\xe5\x12\xee\xc8\x9a\x63\x12\x50\xdb\x6f\x61\x8e\x34\xf5\xb9\x41\xf2\x3a\xb0\x73\x13\x66\x88\xfb\xf2\xc5\xbb\xb8\x30\x44\x7a\x15\xd1\x19\xa5\xc0\x59\x8a\xc8\xfc\xeb\x51\x3a\x8b\x00\x47\x22\x27\x9a\x35\x62\x69\x9a\xae\xf4\x25\xe4\xa6\xcf\xed\xd5\xad\x39\x71\xb7\x41\x23\xd0\x96\xc8\x23\x3c\xe1\x11\x28\x56\x9c\x48\xa4\xb0\x46\xde\x3c\x97\x51\x4a\x5f\x0c\x23\x60\x62\x63\xfe\x81\x3c\xe5\x17\x62\xe1\x5f\x91\xb7\x31\x37\x95\xe3\xbe\xfe\x06\xd1\xf8\x2a\xe8\xb9\xfa\xc1\x77\x99\xb2\x15\x21\x2d\x5d\xef\x37\x8e\x18\xa1\x99\x3e\x96\xde\xf1\x8d\x06\x7c\x61\x4a\x2b\xa7\x0e\x53\xb0\xf3\xd7\x0b\x13\xd0\x48\x8a\x12\x74\x03\xe4\xd0\x30\x0a\x15\x93\xd5\x9e\x13\x09\x14\x77\x28\x28\x8a\xea\x08\xcf\x4c\x6f\x6c\xa4\x3c\x09\xd5\xdf\x50\x79\x70\x67\xe3\x5d\x66\xb1\x1a\xf8\x38\x4d\x56\x92\x69\x9b\x39\x93\x9e\xa8\xd4\x20\x4b\x1f\x1b\xbe\xdf\x8a\xb3\xf9\xa9\xec\xf6\xab\x99\x75\xa9\x30\xb2\x73\x8e\x07\xf2\xba\xed\x11\xc8\xc9\xff\xd9\x64\xb2\x5c\x82\x3b\xa9\x7c\x65\x12\xce\xe1\xdb\xef\x9f\x3d\x30\x05\x44\x8e\x76\x8f\x6d\x5e\x55\x4e\x0e\x44\x46\x07\x2b\xb8\x7f\x70\xbd\xde\x8e\x76\xf1\xfc\x55\x27\x0d\x67\x7f\x2c\xdd\x7a\xc4\xca\x43\x76\xaf\x0c\xab\x73\x61\xc6\x5a\xc8\xb8\xc1\x31\xf5\x4c\x2f\xb6\xea\xab\xbe\x32\x6b\xbd\x5c\x5f\x7f\x4b\x33\x4a\x04\x3d\x27\xe3\x95\xcd\xcc\xa9\x90\x6a\xa0\x64\x70\x9d\x76\xed\xb0\x17\x4e\x45\x86\xe9\xed\xd5\xed\xcc\xaa\x9c\x8d\x01\x4a\x52\x6c\xb4\x66\x82\xe2\xcb\x50\x71\x05\xef\x8d\xe8\x73\x38\xbb\xff\xc1\xec\xc7\x54\x0c\x2a\x39\x7c\xcc\x26\xfd\x34\x99\xf8\xab\xc4\xdc\x71\x7f\x12\xce\x28\xd1\x8d\xb4\xc9\xf4\x39\x77\xa5\xe0\x2e\x10\x33\x49\xa1\x28\xbf\x55\x1b\xdc\x12\xe8\xba\xd2\x96\x81\x67\xd9\x76\x5e\xe9\xe9\x2c\x7d\x04\xb0\xb1\x47\x80\xab\x04\xa7\x73\x5d\x26\x71\x17\x41\x45\x28\xec\x6c\x71\x97\x6b\x3c\x90\xee\xdb\x97\xcb\xf5\x0a\xe2\xb5\x56\xef\x45\x05\xd3\x77\x6a\x06\x28\x65\x23\xf3\x78\x13\x0e\xdf\x4d\x22\xdc\x10\x0a\x88\xb9\x7c\xbd\xe7\xa0\x5c\x3e\xb8\x81\x73\x7f\x05\x9b\xd9\xc5\x14\x54\x84\x73\x33\x92\x8f\xf6\xe8\x7a\xcf\x38\x45\xa9\x60\x8d\x75\x23\x11\x14\x39\xc4\x16\xf6\xf3\x76\x40\xee\x43\x40\x92\xa5\x38\x56\xbe\xdb\xee\xcd\x22\x83\xae\x7b\x48\x6c\xa6\xb3\xfb\xf7\x0f\xe5\x34\xf2\xed\xba\x99\x0f\x90\xbe\x08\x4f\xfd\xed\x45\x35\x9d\x41\x62\x04\xad\x3d\x96\x1d\x7a\x30\xd7\x6f\xc4\x75\x06\xb5\xb0\x27\xef\xcb\xb2\x7c\xb0\x6e\x07\x0f\x0c\x9f\xe9\xe0\x3d\x6d\xcf\x1f\x73\x28\x84\x31\x7d\xf1\x0b\x83\x4c\x78\xd8\x03\x44\x16\xc7\x0f\x8b\x63\x7a\x36\xd4\x6c\x9e\x84\x8a\xc5\x9d\x65\x99\xff\x91\xa8\xf7\x52\x40\x62\x1f\x7a\xf0\x22\x70\x23\xfe\xf7\x39\xd4\x16\xb1\x03\x6c\x98\x87\xed\xcc\xa8\x29\xa5\xcd\x99\x18\xfa\x9d\xfd\x64\x77\xfe\xb7\x02\xc1\x78\x6f\x10\x80\xa0\x94\x61\x29\x50\x0e\xbf\xfe\x84\x60\x3c\x65\xd0\x4d\xa3\xc0\xe9\xf3\x7b\xec\x69\x76\xda\xc6\xee\xef\x3f\x01\x00\x00\xff\xff\x07\xac\x00\x70\xde\x0d\x00\x00") + +func templateMetaTmplBytes() ([]byte, error) { + return bindataRead( + _templateMetaTmpl, + "template/meta.tmpl", + ) +} + +func templateMetaTmpl() (*asset, error) { + bytes, err := templateMetaTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/meta.tmpl", size: 3550, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateMigrateMigrateTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x56\x5d\x4f\xeb\x38\x10\x7d\x4e\x7e\xc5\x28\xea\x22\x40\x25\x11\x3c\x46\xea\xc3\x0a\xb4\xd2\x0a\x89\x5d\x2d\xf0\x84\xd0\xca\x24\xe3\xd6\xaa\x63\x07\xc7\x65\xd5\xcd\xcd\x7f\xbf\xb2\x9d\x0f\x27\x6d\x69\xb9\xf7\x3e\xd5\x1e\xcf\x99\xf1\x9c\x39\x1d\xa7\xae\x21\x47\xca\x04\x42\x54\xb0\xa5\x22\x1a\x23\x68\x9a\xb0\xae\x41\x63\x51\x72\xa2\x11\xa2\x15\x92\x1c\x55\x34\xf6\x08\x59\x51\x4a\xa5\xe1\x3c\x0c\x22\xfa\x96\x25\x28\x74\x42\x19\xf2\x3c\xf2\x0c\x39\x23\x1c\x33\x9d\x54\xef\x3c\xa9\xb2\x15\x16\x24\x0a\x2f\xc2\xf0\x83\x28\x83\x13\x1b\xce\xc9\x1b\x47\x58\x80\x56\x1b\x0c\x83\xba\xbe\x02\x45\xc4\x12\x61\xf6\xef\x1c\x66\x1a\xd2\x05\xcc\xe2\x27\xe3\x53\x99\x9c\x81\xf5\x98\x65\x92\x6f\x0a\x51\x99\xd3\x92\x54\x19\xe1\x30\xd3\xf1\x03\x29\x10\xbe\x41\xa9\x98\xd0\x14\xa2\xdf\xaa\x5b\xe7\x15\x39\x60\x92\x40\x5d\x0f\xd0\xa6\x81\x95\xe4\x79\x05\x7a\x85\xd0\x19\xa9\x54\x76\x1f\x19\xcf\x36\x62\xd3\x44\xa0\xcd\x05\x62\x9b\x7d\x14\x61\x01\x2f\xaf\x97\xae\xac\xd8\x65\xab\xc3\x20\x98\x56\x91\xd9\x2a\x74\xeb\xd1\xd6\x11\x04\x35\x98\xf8\xa9\x4b\x96\xf5\xc9\xe6\xf0\xb4\x2d\x31\x05\x4b\x65\xec\xce\x8c\x25\xbe\x95\xa2\xd2\xad\xd7\xdc\x45\xa8\xaf\x80\x51\xe3\xf0\x2c\xd8\xfb\xc6\x1c\x80\x5b\xa5\x96\xcf\x79\x5d\x03\x8a\xbc\x4f\xd8\xbb\xff\x29\x32\x85\x05\x0a\x6d\x10\xfd\xe6\x08\xe8\xa1\x6b\x56\xd3\x40\xb7\x4e\xe1\xac\xeb\xe1\x1e\xdc\x7f\x4c\xaf\x0c\xf2\x91\xfd\x6f\x51\xe6\x37\x35\x5d\x88\x4d\x09\x87\xfd\x7f\xd7\x5a\x19\x7f\xf3\xeb\xf8\x89\x2d\x33\x87\x11\x77\x48\xc9\x86\xdb\x72\xda\xe5\x5e\x1c\x38\xe2\x0c\xb2\x0f\xd4\x8b\xca\x36\xf9\x88\xa4\xac\x12\xc7\x82\xd2\x1d\x27\x83\x9c\x9c\x22\x80\x09\x2a\x55\x41\x34\x93\xe2\x34\x65\xf5\xa1\x16\x70\xd6\xaa\xca\x26\xb4\xa2\xf2\xc4\x32\xe0\x6d\x39\xad\xae\xd2\x89\xbe\xed\xd9\xdf\x8a\x15\x44\x6d\xef\x71\x9b\xee\xd7\xea\x54\xac\xe5\xba\x55\xeb\x80\xec\x08\xf7\x5d\xd9\x61\x5d\xf7\x9a\xc1\x77\x13\xce\xdd\x75\x10\xf8\xf8\x92\x2f\x66\xcb\xa0\x69\x5e\x27\xdd\x1d\x37\x69\xba\x75\xc5\xfd\x21\x15\xb2\xa5\xb8\xc7\x6d\xe5\x57\x37\x98\xf7\x56\x48\xbb\x0a\x3d\xf8\x90\xb5\x2d\xe1\x71\x5b\xbc\x49\xde\xf2\x4d\xd7\xb1\xdb\xf7\x94\xfb\xac\xef\xa7\x35\x00\xd8\x1d\x04\xd7\x36\x33\x5d\xef\x52\xb6\x4b\xee\xcd\x21\x76\xc7\x04\x67\xd7\x1d\xc1\x37\x5f\x65\x78\x97\xe4\x7d\x96\x66\xde\x77\x35\xb9\x84\x52\x56\xba\x94\x02\x41\x21\x55\x28\x32\x26\x96\xa0\x25\x90\x0f\xc9\x72\xd0\xdb\x12\xb3\x15\x66\x6b\x63\xe5\x52\x96\x15\x5c\x26\x7d\xa0\x7f\x90\xfe\x14\x67\x03\xfe\x38\x6d\xce\xdd\xfe\x79\x7e\x8c\xc0\x6e\x06\xf8\x81\x3e\x7b\x60\x7e\x21\xcb\xdd\x54\xa3\xeb\xf8\x2f\xf1\x5c\xe6\x44\x8f\x67\x7f\x17\xa3\x3b\x4c\xdb\x79\x13\x77\x83\x35\x3c\x90\x63\x12\xfa\x0e\x39\x1e\x0c\xed\x0e\x4f\x0d\xed\xbd\x47\x93\xff\x68\x13\x8e\xac\x49\x02\xed\x63\xee\xa6\x25\xe1\xdc\x8e\x45\xed\x8c\x4c\x78\xf3\x33\x0e\x83\xd6\xd7\x7f\x66\xfb\x81\x78\xfc\x53\x21\xf0\xfa\xf8\xd9\x2c\x9f\x87\xe3\xab\x37\xe6\x0b\x85\x6e\x44\x06\x4c\x30\x7d\x7e\x01\xf5\xc9\x1f\x26\x5f\x7d\x43\x26\xea\xfd\x64\x34\xf9\xef\x83\x7f\x3c\x08\xaf\x17\x2a\x2c\xe0\x54\x05\x4f\xef\xd2\x51\xe0\xad\x9b\x30\xec\x15\xfd\x3d\x00\x00\xff\xff\xb9\x20\xd3\x30\x2c\x0a\x00\x00") + +func templateMigrateMigrateTmplBytes() ([]byte, error) { + return bindataRead( + _templateMigrateMigrateTmpl, + "template/migrate/migrate.tmpl", + ) +} + +func templateMigrateMigrateTmpl() (*asset, error) { + bytes, err := templateMigrateMigrateTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/migrate/migrate.tmpl", size: 2604, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateMigrateSchemaTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x53\xc1\x8a\xdb\x30\x10\x3d\x5b\x5f\x31\x88\x65\xb1\x97\x20\xdf\x03\x39\x94\xdd\x1e\x0a\x69\xd9\x90\xfe\x80\x22\x8d\x6d\xb1\xb6\xe4\x4a\x93\x64\x17\xe3\x7f\x2f\xb2\xe4\x86\x84\x9e\x84\x34\x6f\xde\xbc\x37\xa3\x99\x26\xd0\xd8\x18\x8b\xc0\x83\xea\x70\x90\x1c\xe6\x99\x4d\x13\x10\x0e\x63\x2f\x09\x81\x77\x28\x35\x7a\x0e\x7c\x30\xad\x97\x84\x2b\xe2\x69\xfc\x68\x61\xbb\x83\x93\x0c\x08\x4f\xe2\xd5\xd9\xc6\xb4\xe2\x5d\xaa\x0f\xd9\x62\xc4\x30\x66\x86\xd1\x79\x82\x92\x15\x5c\x39\x4b\xf8\x49\x9c\x15\xbc\x19\x88\xb3\x78\x9e\x54\x8d\x96\x6a\x6d\x64\x8f\x2a\x85\xee\x9f\xea\xf0\xa7\xaf\xb3\x2e\x56\x31\x56\xd7\x70\x3c\xec\xdf\x52\x10\xae\x5e\x8e\x01\xa8\x43\xc8\x70\xf1\xe6\xcd\x05\x3d\x5c\x0d\x75\x20\xb5\x36\x64\x9c\x95\x3d\x24\xe1\xc6\x59\x18\x90\x3a\xa7\x83\x60\xf4\x35\xe2\xc2\x95\x32\x8c\x25\xf4\x8d\x54\x08\x13\x2b\x5e\x3d\x4a\xc2\x32\x2b\x8e\xc6\xe2\xb9\x01\x21\xc4\x4b\x12\x23\x7e\xcb\x53\x8f\x15\xa0\xf7\xce\xb3\x39\x09\x5b\x22\x60\x92\xa2\x6f\xef\x3f\xa0\x71\x1e\x54\xe4\x32\xb6\xdd\xac\x22\x6c\x0b\xd2\x6a\xd0\xde\x8d\xe3\x72\x81\x4c\x99\x25\x25\x96\x40\xfe\xac\x28\x8a\xd1\xfe\x72\xd3\x99\x4b\xfd\xc2\x6b\xc6\x2d\xf4\x18\x40\x82\xc5\x6b\x66\x02\xd5\x1b\xb4\x24\x58\x73\xb6\xea\x86\x2d\x23\xd3\x7d\xa3\x2a\x78\xc9\x3c\x13\x2b\x42\x9c\xe5\x73\xba\x4f\x33\x2b\xc2\xd5\x90\xea\x40\xfb\x8b\xc8\x0d\x2f\xab\x88\x53\x71\xdc\x2b\xcf\xcf\xaf\xe3\x61\xbf\x65\x45\x11\x44\xa4\xdf\xc1\x73\x76\xb3\x04\xa6\x54\x66\x1b\x49\xe6\x87\xcc\xe3\x61\x6f\x08\xff\x97\x9a\x22\x0f\xb9\x33\x2b\x3c\xd2\xd9\x5b\x08\xb9\x09\x69\x4a\xb7\x0e\xf4\xfd\xea\xdf\x63\x70\x67\xaf\x30\xe4\x16\x94\x61\xf5\x59\xc1\x3a\x5b\xfa\x84\x87\xf9\xe6\x69\x46\x8b\xa6\x81\x2c\x6a\x07\xd6\xf4\xf1\x69\xad\xde\x0c\x24\xbe\x47\x5c\x53\xf2\x75\x07\xe6\xb9\xce\xab\xb1\x5d\xed\x81\x76\x18\xc0\x3a\x82\x70\x1e\x97\x1d\xf8\xf7\x07\x79\x75\xe7\x26\xd6\x11\x37\x55\x1b\x58\xbe\x56\x10\x42\x54\x6c\x59\x33\xb4\x1a\xe6\xf9\x6f\x00\x00\x00\xff\xff\xe1\x18\xb7\x71\xa8\x03\x00\x00") + +func templateMigrateSchemaTmplBytes() ([]byte, error) { + return bindataRead( + _templateMigrateSchemaTmpl, + "template/migrate/schema.tmpl", + ) +} + +func templateMigrateSchemaTmpl() (*asset, error) { + bytes, err := templateMigrateSchemaTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/migrate/schema.tmpl", size: 936, mode: os.FileMode(420), modTime: time.Unix(1559129106, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateTxTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x4d\x8f\xdb\x36\x10\x3d\x4b\xbf\x62\x6a\x2c\x0a\x7b\xa1\x48\x3d\x1b\xd8\xd3\xa6\x87\x02\xed\xa2\x6d\x1c\x34\xb7\x84\x26\xc7\x16\x11\x89\x74\x29\xca\xa6\x21\xf8\xbf\x17\x33\xd4\x97\xd7\xde\xf4\xe3\xd2\x4b\xd6\x22\x67\x86\x6f\x66\xde\x3c\x32\x5d\x07\x0a\x77\xda\x20\x2c\x7c\x58\xc0\xe5\x92\xa6\x5d\x07\x0f\x87\xaf\x7b\x58\x3f\xc1\x56\x34\x08\x0f\xf9\xb3\x35\x3b\xbd\xcf\x7f\x15\xf2\xab\xd8\x23\x19\x75\x1d\x78\xac\x0f\x95\xf0\x08\x8b\x12\x85\x42\xb7\x88\x5e\x14\x41\xd7\x07\xeb\x3c\x2c\xd3\x64\x21\xad\xf1\x18\xfc\x22\x4d\x16\xcd\xd9\xc8\x45\x9a\x26\x8b\xdd\x56\x16\x68\x7c\xa1\xb4\xa8\x50\xfa\x45\xba\x4a\xd3\xa2\x80\x4d\x00\xdd\x80\x00\xef\x84\x69\x84\xf4\xda\x1a\x51\x81\xac\x34\x1a\x0f\xbe\x14\x9e\xb6\xa5\x43\xe1\x51\xc1\xf6\x0c\x52\x54\x95\x36\x7b\x78\x66\x8b\x7c\x13\x96\xab\x3c\xf5\xe7\x03\x52\xa4\xc6\xbb\x56\x7a\xe8\xd2\x44\x32\xf8\x34\xe9\x3a\x70\xc2\xec\x11\x1e\x3e\x67\xf0\x60\x28\xbd\x87\xfc\xc5\x2a\x6c\xe0\xdd\xe5\x92\x26\x49\x51\x00\xa5\x6e\xf2\x17\x51\x53\x92\x74\x9c\x2f\x71\x40\xb0\xb3\x0e\xb4\xf1\xe8\x08\x9a\xd9\xc3\x49\xfb\x92\xf7\xaf\x9d\xb6\xad\xae\x14\xba\x26\x4f\x93\xe4\x7a\xe7\xf1\xea\x33\xa2\x66\x58\x68\x14\x95\xed\xc2\x55\x78\xb6\x75\xad\x3d\x48\xfe\x13\x01\xcc\x0a\x92\xa7\xbb\xd6\x48\x58\xfa\x00\x8f\x9b\xb0\xea\xad\x97\x2b\x40\xe7\xac\xa3\x74\x1d\xfa\xd6\x19\xf0\x21\x8f\x89\xe7\xca\xe9\x23\xba\x7c\xf9\xe8\xc3\x7b\xfe\xb9\xca\x7d\xc8\x07\xc7\xfe\xd4\xdf\x6d\x55\x6d\x85\xfc\x0a\xae\xff\xf1\xb7\x27\x0f\x1e\xff\xe1\xec\xc9\x75\xc8\x39\x56\x38\xba\x13\x05\x9e\x67\x4d\xdf\x6a\xa3\x1a\xf0\x16\x64\xeb\x1c\xaf\x7e\xa3\x1c\xec\xb7\x5c\xc1\x63\x1f\x61\x02\xf5\x7d\x5c\xe9\xd2\xa4\x67\xc4\x7a\xc2\x99\xa5\x49\xf2\x41\x96\x58\x8b\x35\xd4\x7a\xef\x84\xc7\xfc\x05\x4f\x71\x69\xe9\x43\x9f\xc7\x2a\x8b\x3d\xfd\x36\x8d\xae\xbb\xbe\x86\x17\x3c\xdd\x69\xfc\x72\x3c\x7c\x88\x4a\x2c\xe0\x08\xcc\x84\xae\x2b\x1e\x61\xa7\x5d\xe3\xc1\x58\x85\xcc\x3e\x65\x25\x60\x10\xf5\xa1\x42\x78\x2c\x78\x0a\xdf\xc1\x43\x34\x5a\x3f\x81\x36\x0a\xc3\x08\xe6\x07\x9e\xc4\xa2\x80\xa1\xf4\x70\x72\xe2\x10\xdb\xba\xd7\x47\x34\xd0\x8f\x5f\xbe\x09\x91\xcb\x02\x8c\x3d\x8c\xab\xbd\x93\xa6\xd3\x6a\x34\x5e\xc4\x7a\xd3\x9c\x96\x08\x5a\xa1\xe0\xf9\xb0\xd0\xb4\x07\x9e\xf6\x59\x5b\x1a\x0e\x68\x5b\x0f\x42\x29\x9a\x15\x61\xce\x80\xc1\x3b\x01\x92\x92\xf1\x96\x61\x4c\xa3\x52\x14\xf0\x47\x89\x06\xc4\xb0\xc6\xc3\xcd\xe1\x7b\x0a\xd1\x74\x67\xa0\x3d\xec\xb1\x9f\x8a\x86\xca\x39\xcb\x41\x9b\xc6\x0b\x23\x31\x9f\x4d\x91\x30\x6a\xa2\xb6\x70\xc8\x19\x52\x29\x29\x00\x0f\x33\x49\xcc\x80\x83\xcd\x69\xa7\x6d\xd0\x41\xdd\x36\x9e\x61\x80\x35\x48\x31\xed\x8e\x36\x6b\xd0\x06\xac\x23\x8c\xc4\xc9\x78\x8e\x75\xe3\xe0\xdc\xce\x4d\x51\x90\xf7\x4f\x3b\x10\x20\x2b\xdb\xa0\x9a\x6f\x53\x11\xb1\xde\xa2\x52\xa8\x38\xb2\xc1\xfe\x20\xd8\xa3\x41\xc7\x62\x87\xc6\x6b\xaf\xb1\xc9\x46\x84\xbc\x72\xa6\xb8\xe2\x70\xa8\x34\xd2\xd0\xfc\xd9\xa2\x3b\x67\x9c\x5e\xcf\x92\x35\x6b\x13\x13\x64\x60\x5f\xfe\x1b\x59\x7d\xfa\xf4\x89\xca\x49\x91\xd8\x0b\x4e\xba\xaa\x60\x8b\x80\x01\x65\xeb\x51\x31\x71\x4a\x67\xdb\x7d\xd4\x38\xd5\x53\xa8\xd4\xb2\x1c\x35\xd8\x97\xc4\x80\xdb\x54\x5f\xac\xc7\x38\xbb\x6c\xd1\xfb\xea\x06\x1a\xb1\x8b\x4c\x96\xd6\x0c\xd3\xdc\x36\x62\x8f\x19\x94\xf6\x84\x47\x74\xdc\xe2\x1e\x44\x03\xd6\x54\x67\xae\x08\x83\xe4\x6c\x7d\xac\xaf\xae\xb1\xd7\xfa\x91\xde\x93\xe2\x33\xf6\x09\x33\x42\xe3\x85\x8b\x80\xaf\x5a\x03\x3b\x67\xeb\x3c\x4d\x94\x3b\xbe\xa2\x3d\xc7\x38\x38\xeb\x51\x0e\x1a\x1c\x60\x8b\x95\x3d\xb1\xcf\x1c\x7f\x04\xcb\xd9\x27\x75\x0b\x74\xc9\xe5\xbf\xb4\x1e\x43\xc4\x11\x86\x5b\xa4\x35\x0a\x5d\x75\xa6\x69\xb8\x2a\x59\xe2\xc3\x8c\xc5\xbd\x20\x1a\x3c\x6d\x42\x5f\x67\x6a\xad\xc1\xd3\xab\x6b\xb1\x1f\x8b\xa8\x7f\x6c\xbe\x94\x3e\x40\x7f\xdd\xd2\x8d\x4d\x7f\x33\xb8\xcd\x6d\x05\x93\x1c\x67\x51\xbb\x57\x54\x35\x1f\xf8\x8b\x94\x44\xb9\x63\x1e\x03\xae\xd2\x44\xef\x78\xf9\xbb\x27\x30\xba\x22\xc3\x41\x51\x8d\xae\xd8\x83\x14\x6b\x54\xd9\x21\x72\xe7\x03\x09\x2c\x03\x58\xd3\x3f\x97\x8c\x1c\xfa\xfc\x36\x61\x14\xfb\xd7\x3d\x21\x95\x3a\xa0\x23\x4d\x1f\xf0\x7a\x0b\xe2\x68\xb5\x1a\xa6\xda\xba\x69\xa8\x59\x27\x28\x24\xf7\xe5\xee\x58\xe7\xf0\xa1\xb4\x6d\xa5\x88\xdf\x64\x8e\x2a\x12\x6b\x7b\x7e\xc3\x7e\x76\xa9\x4c\x20\xa8\x1e\xd7\xc5\x5d\xc1\x72\x6a\xdc\x54\x49\x18\x6f\x41\xce\x18\x62\xc6\xef\xa3\xe5\x55\xda\xbd\xf7\x30\xef\xff\x94\xaf\xf7\xd0\xf5\xe1\x97\x2b\x1a\x03\xe2\xd8\x0c\x46\x4e\xed\x9c\x0c\x86\x2b\xd7\x36\x18\xdf\x5b\xa4\x89\x2c\x4c\x43\xe8\x59\x5c\x36\x9b\xae\x78\x98\x5a\x0f\x57\xcf\x95\x29\x50\xfc\x7e\x53\x63\x59\x9d\x3f\x5e\xeb\xeb\x97\xcd\xf0\x1e\xf9\x72\x4f\x5c\xdf\x78\x88\xcc\x51\xbe\x7a\x05\xdd\xc2\x1c\xf9\x32\x02\x1d\xf5\xfa\x5f\x43\x1d\x62\x5d\x83\x7d\x5b\xff\x6f\xe0\xde\x3e\x9d\x6e\x01\xff\x18\x50\x0e\x97\x60\xc8\xe9\xeb\x7e\xe3\x69\xe7\xfe\xe4\x47\x61\x8f\x74\xc8\x40\xb8\x7d\x13\x53\xdc\x09\x89\xdd\x25\x83\xe3\xfc\x73\xf6\x88\xf3\x21\xaf\xdb\xfc\x67\xcb\xef\xb3\x44\xe1\x8e\xf2\xe3\xb5\x8f\xa6\xea\x57\x27\x6e\xf5\xe0\x08\x42\x36\xdc\x40\x74\x56\x06\xc7\xe1\x75\xc7\x37\xce\x94\x0b\x7f\xde\x4f\x86\xb7\xfe\xe7\x6c\x46\x0c\x77\xd3\x39\x0a\x07\x9f\x5f\xbf\x91\x9e\xe6\xdd\x5d\x1a\x5d\xad\xf8\x3f\x51\xfd\xb3\xfe\xaf\x00\x00\x00\xff\xff\x08\xa2\xb4\xee\x62\x0d\x00\x00") + +func templateTxTmplBytes() ([]byte, error) { + return bindataRead( + _templateTxTmpl, + "template/tx.tmpl", + ) +} + +func templateTxTmpl() (*asset, error) { + bytes, err := templateTxTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/tx.tmpl", size: 3426, mode: os.FileMode(420), modTime: time.Unix(1560438408, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _templateWhereTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x4f\x6f\xdb\x3a\x12\x3f\xcb\x9f\x62\x60\xb8\x78\x52\xe0\xd2\xad\xdb\x5e\x0a\xe4\xf0\xf6\x35\xdd\x7a\xb7\x6d\x9a\x4d\xd0\x77\x08\x82\x80\x91\x46\x31\x5b\x99\x52\x48\xda\x41\xa0\xfa\xbb\x2f\x48\x4a\xd4\x7f\x3b\xde\x36\x3d\x6d\x4e\x96\x38\x9c\x3f\xbf\x99\xf9\xcd\x28\x79\x0e\x11\xc6\x8c\x23\x8c\xef\x97\x28\x70\x0c\xdb\xed\x68\x94\xe7\xa0\x70\x95\x25\x54\x21\x8c\x97\x48\x23\x14\x63\x98\x90\x2f\x34\xfc\x4e\x6f\xb1\x2b\xc2\x56\x59\x2a\xd4\x18\x26\xe6\x68\x36\x83\xc5\x3b\x88\x59\xa2\x50\x48\xd8\xa0\x50\x2c\x44\x09\x37\x54\x62\x04\x29\x07\xb5\x44\x26\x80\x45\xc8\x15\x8b\x19\x0a\x32\x8a\xd7\x3c\x84\xc5\x3b\x9f\x45\x90\xe7\x30\x21\x8b\x77\xe4\xe2\x21\xd3\x86\x02\x40\xae\xc8\x17\x81\x11\x0b\xb5\xa9\x7c\xe4\x09\x54\x6b\xc1\x9b\xef\xf3\x91\xe7\x9d\x9f\x7d\x7c\x0b\x5a\x93\x2f\xe1\x48\xde\x25\xe4\x1c\x13\x0c\x55\x2a\x02\x7d\xcb\xf3\xf2\xfc\x39\xb0\xd8\x6a\x5f\xc8\x73\x25\x18\xbf\x85\xed\x96\x45\x53\xb8\x86\xb7\xc7\x20\x95\x08\x53\xbe\x21\x7f\xaa\x94\xf9\x2c\x0a\xb4\x3c\xf2\x48\x87\xe4\x79\x9e\x24\x7f\x6b\x7c\x7c\xad\xf8\xe4\xcc\x97\xe4\x2f\xbf\x74\xf5\xaf\x94\x4b\x45\xb9\xd2\xee\x4e\x81\x45\x41\x30\xf2\xbc\xed\x74\xe4\x79\xff\x14\xb8\x4a\x18\x2f\xdc\x52\x70\x14\xc9\x84\x5c\x08\xba\x41\x21\x69\x52\xf8\xa5\xc8\x07\x2a\x4d\xf0\xe5\xbd\xed\xc8\x02\x2c\x28\xbf\x45\x98\x5c\x4f\x61\x92\x66\xda\xc5\x34\x93\xc6\xa4\xf1\x49\x9b\x17\xfa\xed\x98\x45\x3a\x6d\x79\x6e\xc2\x4b\x33\xf2\x95\x0a\x46\x23\x16\xda\x97\x13\x01\x46\x46\x16\x42\x65\x4c\xfa\xc8\x00\xff\xf6\x18\x32\xc1\xb8\x8a\x61\xbc\x78\xf7\x4c\x8e\x8d\x8e\xcf\x74\x65\x12\xed\xcd\x66\xe0\x24\xb7\x5b\xa0\x59\x96\x30\x94\x3a\x89\xe6\x7d\x25\x0a\x99\xcb\x92\x4d\xb2\xad\x02\x4c\x22\x32\xf2\xcc\xf5\x9a\x1e\xdf\x3a\xb6\xdd\x42\xaf\xdb\x84\x10\xe7\xe9\xfe\x8a\x18\x2c\x89\xbd\x35\xe1\x8a\xa2\x69\xde\x1c\xe9\xc8\x59\x0c\x3c\x55\x40\xc5\xed\x7a\x85\x5c\x49\xb8\x47\x81\x90\x89\x74\xc3\x22\x8c\xa6\x1a\x0d\xed\xa4\x0e\xf6\xfd\x9f\x1f\xcf\x4f\x20\x2c\x6a\x41\x4e\x9d\x0e\xc9\x78\x88\x70\x8f\x10\x52\xfe\x87\x32\x00\x3e\xc0\x78\xf1\x19\xfc\x60\x4c\xe0\x62\xc9\x24\xdc\xb3\x24\x81\x15\xfd\x8e\xa0\xf4\x63\x05\x64\x4c\x13\xf9\x40\xac\x2a\x16\x43\x82\xdc\x01\x17\xc0\xf1\x31\xbc\x28\xc2\x68\xd6\xe7\x7b\x9a\x48\xf4\x4d\x19\xea\x3f\x8b\x8e\x7d\x28\x42\xdb\xe8\xa4\x6b\x83\xfe\xe5\x15\xe3\x0a\x45\x4c\x43\xcc\xb7\xd3\xa6\x85\x42\x41\x9c\x0a\x60\xfa\x82\xad\xc7\x8d\xb3\x59\xa4\xae\xd5\x50\x9b\x4b\x76\xa5\x5b\xaa\xd5\x51\xa5\xd6\x4b\x76\x15\xe8\xdc\x26\x12\x0b\x61\x38\x86\xda\x61\xad\x42\x2b\x87\x4d\x2f\xea\x1b\x3d\xf6\x8a\x88\xfa\xba\xd8\x45\x52\x29\xa9\x54\xd7\x11\x6b\x56\xf2\x8e\xee\xde\xec\x2d\x57\x8b\x9a\x69\xff\xc7\xf5\xff\xce\x1a\xfc\x35\x89\xea\xa2\xdc\x01\xb7\xc2\xa5\xe4\xa3\xac\x44\xa5\x88\xa2\xe8\xda\xae\xa7\x9b\x22\x7c\x9b\xd1\xd2\x48\x1f\x20\x5b\xcd\x6d\x55\x82\x5b\x1c\x17\x6b\xd7\x27\xe4\xbd\xa6\x0c\xd9\xa5\x28\x2a\x43\x9a\xc0\x24\xde\x4b\x4d\x78\xb7\xa6\x09\x53\x0f\x10\x2e\x31\xfc\xde\xa5\xa5\x3c\x87\xbb\x75\xaa\xb0\xa6\xab\xe0\x29\x58\xa8\x3f\x64\x31\x99\xb4\x31\x95\xd6\x0d\x9c\x9c\xf5\x11\xd9\xc6\x3e\x3d\x0d\x3d\x0d\x4c\x9d\xb8\x5d\x95\x87\x17\x9d\xc9\xb2\xff\x91\xde\x60\x32\x85\x8e\xce\x29\x64\xda\xde\xe6\xe7\x72\xd7\x99\x5f\xbe\x19\x60\x71\xd0\x9a\x5f\x9b\xfd\xe3\x6b\xb3\x77\x7a\x81\xdf\xac\x90\x00\xfc\xb2\xa5\x83\x9f\x1d\x64\xc3\x15\xf3\xd3\x93\x6d\x67\xe1\x0c\x33\xc3\x01\xbc\x30\xc0\x0a\xfd\x9c\xb0\x1d\xb5\xf8\xe0\xff\xa3\xf5\xb0\xd1\x7a\xf8\x90\xe9\x34\xf3\x13\x8c\x98\xfd\xdd\xde\xe5\xfa\x83\xfc\x30\xec\xe0\x55\xfd\x39\x48\x14\x68\x89\xe2\x24\xba\xc5\x61\x8e\x47\x0b\xd2\x0f\xb7\x97\x7e\xa0\xf2\x99\x61\x00\x2b\x9f\xe8\x40\x8c\x22\xac\xc7\x61\x0f\x23\x26\x30\x54\x2c\xe5\x86\x5b\x4e\xd7\xaa\xba\xc7\xb8\x86\x05\xaf\x9b\x22\x0b\xee\x24\x74\xbc\x48\x16\x72\x61\x05\x6d\x16\xf3\xdc\xab\x29\xad\x5d\x18\xd0\x59\xb7\xea\xe5\xf9\xec\x08\xe8\x26\x65\x11\x84\x4c\x84\xeb\x84\x0a\x88\x50\x97\x36\x86\x9a\x7e\x8e\x66\x4e\x93\x8d\xca\x04\x55\xd8\x6f\xc7\x56\x16\xd6\x0e\x2a\xfb\x40\xa5\xc6\x76\x17\x87\xa1\xe3\x30\x8c\x6e\xb1\x8f\xc2\x9e\x66\xc7\x46\xf2\x69\xfe\x09\x1c\x07\xa8\x97\x66\x5f\x23\x17\xf4\x26\x41\xbf\xe8\xa8\xb2\x5f\xca\xd6\xbb\x4b\xc8\x82\x97\x4f\x9e\x7a\x39\xb4\x98\x95\x12\x95\x7d\x23\x87\xe4\xcb\xbf\x6b\x72\x97\xbd\x29\x7e\x59\x2d\x2f\x2f\x1c\xc8\x57\x01\x79\x2f\xd2\x95\x69\x5c\xeb\xa1\xd5\x67\x7e\xd7\x4d\x3b\xdb\xe5\x8f\xa0\xb3\xb1\xa6\xc2\x06\x7f\x0a\x3e\xe5\x91\xfe\x7d\x3a\x3f\x6d\x78\x11\x98\x69\x30\x3b\x02\x2d\xf4\xe3\x07\xf8\x5a\xe0\x9e\xa9\x25\x14\xe5\x65\x52\x15\x94\xd5\xb2\x0f\x3d\xed\xf4\xe7\x54\x7d\x5e\x27\x89\xef\x30\xd3\xad\x92\xac\x57\xbc\xe1\x7c\xcb\xd9\xc2\x8b\xd3\xf9\xa7\xa6\x17\x54\xca\x34\x3c\xcc\x87\x5f\x96\xc1\xae\xd7\xa4\x94\xf5\x1e\x99\xa2\xea\x42\x17\x9f\x41\x68\x76\xe4\xd5\xf1\xfb\xa1\x6b\xfe\xec\x48\xd7\x03\xd3\x5b\x26\xe5\x06\x4f\x8b\xaf\xc4\x24\x7e\x2e\x30\x46\x81\x3c\xc4\x29\x28\x3b\xb0\x10\xd4\x7d\x5a\xfd\x07\xc5\x21\x5f\x75\xd4\x39\x26\xf1\x7f\x30\xae\x9a\x8a\xfc\x23\x55\x4b\xbf\xa2\x93\xc6\xc7\x4f\x52\x52\x9a\x91\x6c\x92\xe5\x76\x7b\xd2\xbc\xd6\x39\xff\xea\x0f\x42\xe0\xe8\xdf\x52\x48\xf5\x0f\x85\x67\xf2\x6f\xa6\x96\x63\x47\x2d\xbf\x96\xbb\x8a\xe2\x84\x5b\xb6\x41\xae\x17\x84\x88\x69\x5f\x25\xf8\xa9\x5a\xa2\xa8\x14\xc9\xa0\x8f\xe6\xf4\xb1\x04\x42\x48\x83\xd4\x7e\x0b\xf9\x99\xc9\xa1\xbb\xe7\x65\xb1\x07\x7e\xd3\x0f\x2f\xcc\xc3\xf3\x1e\x8e\xb2\xf2\xa5\x84\x16\x77\x57\x75\x2a\x9e\xef\x6c\x49\x35\x37\x2f\x5d\x9b\xb8\xcf\x77\x63\x89\x23\x4c\xec\x81\x69\x9e\x87\xac\xe8\xa0\x4a\xa9\x75\xd7\x4d\xa5\x76\x7f\xd5\xd4\x99\x0a\xab\xdd\x33\xa2\xf5\xf3\xba\xaf\xa5\x77\xaf\x9a\xde\x0d\x34\x71\x21\xfc\xba\x14\x2e\x38\x42\xbd\x72\xf4\xd6\xa1\xfa\xc9\x37\x43\xe4\xae\xfd\x0d\x5b\xa8\x57\xee\xf9\x5f\x29\xe3\xbe\x9a\xbb\xe7\x53\xbe\x5b\x1d\x33\xea\xa6\xa0\xe6\x4e\xc8\xc0\xd5\xa2\xb2\xd2\xd5\x37\x2d\x57\x8b\x89\xa2\xe6\xb5\xcf\xf5\xeb\x29\x64\xd5\x6e\x6e\x0b\xb2\xdc\x3b\x33\x72\x7e\xf6\xd1\x57\x6f\x82\xc6\xbf\x6e\xd4\x6b\xa3\xa6\x0c\xff\x4d\x0f\xf5\x2f\xb8\x3f\xcc\xb3\xa0\x5e\xb7\x59\xff\x77\x8e\xa8\xaa\x16\x1b\x24\xdf\x87\x63\x7b\x00\xff\xbe\xb2\xed\x2f\xc1\xc1\x3a\x3e\x2c\x9f\xf3\x66\x3e\x87\x12\xd7\x37\x93\x74\xe9\x3d\xc1\xc8\x1e\xc8\x49\xcf\xe0\x7d\xe4\xbc\x7d\x0a\x3c\x7a\x0a\xb9\x81\xc5\xff\x3c\x93\xdb\x83\xd4\xc0\xa9\x07\xb2\xc0\x18\x56\x48\xb9\x04\xa6\x40\x2e\xd3\x75\x12\xc1\x0d\x82\x12\x6b\x04\xc6\x21\xe5\x08\x69\x6c\xe6\x93\x9b\x92\xb5\x19\xed\x31\x3e\x85\x74\xad\x74\xf4\xd7\xd7\x64\xc1\xbf\xfa\xc1\x54\xff\x3a\x5d\xab\x72\x92\x3e\x02\xa2\x22\x0e\x9f\xf1\xa0\xf3\x2e\x5d\xab\x16\x37\x34\x77\x2f\x6d\x4c\xb8\xd5\xcb\x9a\x6e\x0f\x79\x7b\x81\xf1\x6a\x01\x33\xce\xf6\x8b\x69\x83\x7b\xd6\x5d\x57\x68\xa2\x08\xbc\xff\x1b\xa9\xda\x26\x0e\xc0\x40\x89\x76\xb8\xfb\x56\x18\xeb\xb6\x12\x87\x2f\x33\xcd\x0f\xd8\xe2\xe7\x7f\x03\x00\x00\xff\xff\x97\x57\xb7\xea\x2e\x1b\x00\x00") + +func templateWhereTmplBytes() ([]byte, error) { + return bindataRead( + _templateWhereTmpl, + "template/where.tmpl", + ) +} + +func templateWhereTmpl() (*asset, error) { + bytes, err := templateWhereTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/where.tmpl", size: 6958, mode: os.FileMode(420), modTime: time.Unix(1560146912, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "template/base.tmpl": templateBaseTmpl, + "template/builder/create.tmpl": templateBuilderCreateTmpl, + "template/builder/delete.tmpl": templateBuilderDeleteTmpl, + "template/builder/query.tmpl": templateBuilderQueryTmpl, + "template/builder/setter.tmpl": templateBuilderSetterTmpl, + "template/builder/update.tmpl": templateBuilderUpdateTmpl, + "template/client.tmpl": templateClientTmpl, + "template/config.tmpl": templateConfigTmpl, + "template/dialect/gremlin/create.tmpl": templateDialectGremlinCreateTmpl, + "template/dialect/gremlin/delete.tmpl": templateDialectGremlinDeleteTmpl, + "template/dialect/gremlin/group.tmpl": templateDialectGremlinGroupTmpl, + "template/dialect/gremlin/query.tmpl": templateDialectGremlinQueryTmpl, + "template/dialect/gremlin/update.tmpl": templateDialectGremlinUpdateTmpl, + "template/dialect/sql/create.tmpl": templateDialectSqlCreateTmpl, + "template/dialect/sql/delete.tmpl": templateDialectSqlDeleteTmpl, + "template/dialect/sql/group.tmpl": templateDialectSqlGroupTmpl, + "template/dialect/sql/query.tmpl": templateDialectSqlQueryTmpl, + "template/dialect/sql/update.tmpl": templateDialectSqlUpdateTmpl, + "template/ent.tmpl": templateEntTmpl, + "template/example.tmpl": templateExampleTmpl, + "template/header.tmpl": templateHeaderTmpl, + "template/import.tmpl": templateImportTmpl, + "template/meta.tmpl": templateMetaTmpl, + "template/migrate/migrate.tmpl": templateMigrateMigrateTmpl, + "template/migrate/schema.tmpl": templateMigrateSchemaTmpl, + "template/tx.tmpl": templateTxTmpl, + "template/where.tmpl": templateWhereTmpl, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "template": &bintree{nil, map[string]*bintree{ + "base.tmpl": &bintree{templateBaseTmpl, map[string]*bintree{}}, + "builder": &bintree{nil, map[string]*bintree{ + "create.tmpl": &bintree{templateBuilderCreateTmpl, map[string]*bintree{}}, + "delete.tmpl": &bintree{templateBuilderDeleteTmpl, map[string]*bintree{}}, + "query.tmpl": &bintree{templateBuilderQueryTmpl, map[string]*bintree{}}, + "setter.tmpl": &bintree{templateBuilderSetterTmpl, map[string]*bintree{}}, + "update.tmpl": &bintree{templateBuilderUpdateTmpl, map[string]*bintree{}}, + }}, + "client.tmpl": &bintree{templateClientTmpl, map[string]*bintree{}}, + "config.tmpl": &bintree{templateConfigTmpl, map[string]*bintree{}}, + "dialect": &bintree{nil, map[string]*bintree{ + "gremlin": &bintree{nil, map[string]*bintree{ + "create.tmpl": &bintree{templateDialectGremlinCreateTmpl, map[string]*bintree{}}, + "delete.tmpl": &bintree{templateDialectGremlinDeleteTmpl, map[string]*bintree{}}, + "group.tmpl": &bintree{templateDialectGremlinGroupTmpl, map[string]*bintree{}}, + "query.tmpl": &bintree{templateDialectGremlinQueryTmpl, map[string]*bintree{}}, + "update.tmpl": &bintree{templateDialectGremlinUpdateTmpl, map[string]*bintree{}}, + }}, + "sql": &bintree{nil, map[string]*bintree{ + "create.tmpl": &bintree{templateDialectSqlCreateTmpl, map[string]*bintree{}}, + "delete.tmpl": &bintree{templateDialectSqlDeleteTmpl, map[string]*bintree{}}, + "group.tmpl": &bintree{templateDialectSqlGroupTmpl, map[string]*bintree{}}, + "query.tmpl": &bintree{templateDialectSqlQueryTmpl, map[string]*bintree{}}, + "update.tmpl": &bintree{templateDialectSqlUpdateTmpl, map[string]*bintree{}}, + }}, + }}, + "ent.tmpl": &bintree{templateEntTmpl, map[string]*bintree{}}, + "example.tmpl": &bintree{templateExampleTmpl, map[string]*bintree{}}, + "header.tmpl": &bintree{templateHeaderTmpl, map[string]*bintree{}}, + "import.tmpl": &bintree{templateImportTmpl, map[string]*bintree{}}, + "meta.tmpl": &bintree{templateMetaTmpl, map[string]*bintree{}}, + "migrate": &bintree{nil, map[string]*bintree{ + "migrate.tmpl": &bintree{templateMigrateMigrateTmpl, map[string]*bintree{}}, + "schema.tmpl": &bintree{templateMigrateSchemaTmpl, map[string]*bintree{}}, + }}, + "tx.tmpl": &bintree{templateTxTmpl, map[string]*bintree{}}, + "where.tmpl": &bintree{templateWhereTmpl, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/entc/gen/func.go b/entc/gen/func.go new file mode 100644 index 000000000..7e28b7fb5 --- /dev/null +++ b/entc/gen/func.go @@ -0,0 +1,215 @@ +package gen + +import ( + "fmt" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "text/template" + "unicode" + + "fbc/ent/field" + + "github.com/go-openapi/inflect" +) + +var ( + rules = ruleset() + acronym = make(map[string]bool) + funcs = template.FuncMap{ + "ops": ops, + "add": add, + "order": order, + "snake": snake, + "pascal": pascal, + "extend": extend, + "xrange": xrange, + "receiver": receiver, + "plural": plural, + "aggregate": aggregate, + "primitives": primitives, + "singular": rules.Singularize, + "quote": strconv.Quote, + "base": filepath.Base, + "keys": keys, + "join": join, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "hasSuffix": strings.HasSuffix, + } +) + +// ops returns all operations for given type. +func ops(f *Field) []Op { + switch t := f.Type; { + case t == field.TypeBool: + return boolOps + case t == field.TypeString && strings.ToLower(f.Name) != "id": + return stringOps + default: + return numericOps + } +} + +// xrange generates a slice of len n. +func xrange(n int) (a []int) { + for i := 0; i < n; i++ { + a = append(a, i) + } + return +} + +// plural a name. +func plural(name string) string { + p := rules.Pluralize(name) + if p == name { + p += "Slice" + } + return p +} + +// pascal converts the given column name into a PascalCase. +// +// user_info => UserInfo +// full_name => FullName +// user_id => UserID +// +func pascal(s string) string { + words := strings.Split(s, "_") + for i, w := range words { + upper := strings.ToUpper(w) + if acronym[upper] { + words[i] = upper + } else { + words[i] = rules.Capitalize(w) + } + } + return strings.Join(words, "") +} + +// snake converts the given struct or field name into a snake_case. +// +// Username => username +// FullName => full_name +// HTTPCode => http_code +// +func snake(s string) string { + var b strings.Builder + for i := 0; i < len(s); i++ { + r := rune(s[i]) + // put '_' if it is not a start or end of a word, current letter is an uppercase letter, + // and previous letter is a lowercase letter (cases like: "UserInfo"), or next letter is + // also a lowercase letter and previous letter is not "_". + if i > 0 && i < len(s)-1 && unicode.IsUpper(r) && + (unicode.IsLower(rune(s[i-1])) || + unicode.IsLower(rune(s[i+1])) && unicode.IsLetter(rune(s[i-1]))) { + b.WriteString("_") + } + b.WriteRune(unicode.ToLower(r)) + } + return b.String() +} + +// receiver returns the receiver name of the given type. +// +// User => u +// UserQuery => uq +// +func receiver(s string) (r string) { + words := strings.Split(snake(s), "_") + for _, w := range words { + r += w[:1] + } + return +} + +// scope wraps the Type object with extended context. +type scope struct { + *Type + Scope map[interface{}]interface{} +} + +// extend extends the parent block with a KV pairs. +// +// {{ with $scope := extend $ "key" "value" }} +// {{ template "setters" $scope }} +// {{ end}} +// +func extend(t *Type, kv ...interface{}) *scope { + s := &scope{Type: t, Scope: make(map[interface{}]interface{})} + if len(kv)%2 != 0 { + panic("invalid number of parameters") + } + for i := 0; i < len(kv); i += 2 { + s.Scope[kv[i]] = kv[i+1] + } + return s +} + +// add calculates summarize list of variables. +func add(xs ...int) (n int) { + for _, x := range xs { + n += x + } + return +} + +func ruleset() *inflect.Ruleset { + rules := inflect.NewDefaultRuleset() + // add common initialisms. copied from golint. + for _, w := range []string{ + "API", "ASCII", "CPU", "CSS", "DNS", "GUID", "UID", "UI", + "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "HTML", + "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", + "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS", + } { + acronym[w] = true + rules.AddAcronym(w) + } + return rules +} + +// order returns a map of sort orders. +// The key is the function name, and the value its database keyword. +func order() map[string]string { + return map[string]string{ + "asc": "incr", + "desc": "decr", + } +} + +// aggregate returns a map between all agg-functions and if they accept a field name as a parameter or not. +func aggregate() map[string]bool { + return map[string]bool{ + "min": true, + "max": true, + "sum": true, + "mean": true, + "count": false, + } +} + +// keys returns the given map keys. +func keys(v reflect.Value) ([]string, error) { + if k := v.Type().Kind(); k != reflect.Map { + return nil, fmt.Errorf("expect map for keys, got: %v", k) + } + keys := make([]string, v.Len()) + for i, v := range v.MapKeys() { + keys[i] = v.String() + } + return keys, nil +} + +// primitives returns all primitives types. +func primitives() []string { + return []string{field.TypeString.String(), field.TypeInt.String(), field.TypeFloat64.String(), field.TypeBool.String()} +} + +// join is a wrapper around strings.Join to provide consistent output. +func join(a []string, sep string) string { + sort.Strings(a) + return strings.Join(a, sep) +} diff --git a/entc/gen/graph.go b/entc/gen/graph.go new file mode 100644 index 000000000..3ed7d4fd3 --- /dev/null +++ b/entc/gen/graph.go @@ -0,0 +1,392 @@ +package gen + +import ( + "bytes" + "fmt" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql/schema" + "fbc/ent/field" +) + +type ( + // Config for global generator configuration that similar for all nodes. + Config struct { + // Schema is the package path for the schema directory. + Schema string + // Target is the path for the directory that holding the generated code. + Target string + // Package name for the targeted directory that holds the generated code. + Package string + // Header is an optional header signature for generated files. + Header string + // imports are the import packages used for code generation. + imports map[string]string + } + // Graph holds the nodes/entities of the loaded graph schema. Note that, it doesn't + // hold the edges of the graph. Instead, each Type holds the edges for other Types. + Graph struct { + Config + // Nodes are list of Go types that mapped to the types in the loaded schema. + Nodes []*Type + // Schemas holds the raw interfaces for the loaded schemas. + Schemas []ent.Schema + } +) + +// NewGraph creates a new Graph for the code generation from the given schema definitions. +// It fails if one of the schemas is invalid. +func NewGraph(c Config, schemas ...ent.Schema) (g *Graph, err error) { + defer catch(&err) + c.imports = imports() + g = &Graph{c, make([]*Type, 0, len(schemas)), schemas} + for _, schema := range schemas { + g.addNode(schema) + } + for _, schema := range schemas { + g.addEdges(schema) + } + for _, t := range g.Nodes { + check(g.resolve(t), "resolve %q relations/references", t.Name) + } + return +} + +// Gen generates the artifacts for the graph. +func (g *Graph) Gen() (err error) { + defer catch(&err) + for _, n := range g.Nodes { + path := filepath.Join(g.Config.Target, n.Package()) + check(os.MkdirAll(path, os.ModePerm), "create dir %q", path) + for _, tmpl := range Templates { + b := bytes.NewBuffer(nil) + check(templates.ExecuteTemplate(b, tmpl.Name, n), "execute template %q", tmpl.Name) + target := filepath.Join(g.Config.Target, tmpl.Format(n)) + check(ioutil.WriteFile(target, b.Bytes(), 0644), "create file %q", target) + } + } + for _, tmpl := range GraphTemplates { + if dir := filepath.Dir(tmpl.Format); dir != "." { + path := filepath.Join(g.Config.Target, dir) + check(os.MkdirAll(path, os.ModePerm), "create dir %q", path) + } + b := bytes.NewBuffer(nil) + check(templates.ExecuteTemplate(b, tmpl.Name, g), "execute template %q", tmpl.Name) + target := filepath.Join(g.Config.Target, tmpl.Format) + check(ioutil.WriteFile(target, b.Bytes(), 0644), "create file %q", target) + } + return run(exec.Command("goimports", "-w", g.Config.Target)) +} + +// Describe writes a description of the graph to the given writer. +func (g *Graph) Describe(w io.Writer) { + for _, n := range g.Nodes { + n.Describe(w) + } +} + +// addNode creates a new Type/Node/Ent to the graph. +func (g *Graph) addNode(schema ent.Schema) { + t, err := NewType(g.Config, schema) + check(err, "create type") + g.Nodes = append(g.Nodes, t) +} + +// addEdges adds the node edges to the graph. +func (g *Graph) addEdges(schema ent.Schema) { + t, _ := g.typ(reflect.TypeOf(schema).Name()) + for _, e := range schema.Edges() { + typ, ok := g.typ(e.Type()) + expect(ok, "type %q does not exist for edge", e.Type()) + switch { + // assoc only. + case e.IsAssoc(): + t.Edges = append(t.Edges, &Edge{ + Type: typ, + Name: e.Name(), + Owner: t, + Unique: e.IsUnique(), + Optional: !e.IsRequired(), + }) + // inverse only. + case e.IsInverse() && e.Assoc() == nil: + expect(!e.IsRequired(), "inverse edge can not be required: \"%s.%s\"", t.Name, e.Name()) + expect(e.RefName() != "", "missing reference name for inverse edge: \"%s.%s\"", t.Name, e.Name()) + t.Edges = append(t.Edges, &Edge{ + Type: typ, + Name: e.Name(), + Owner: typ, + Inverse: e.RefName(), + Unique: e.IsUnique(), + Optional: !e.IsRequired(), + }) + // inverse and assoc. + case e.IsInverse(): + ref := e.Assoc() + expect(e.RefName() == "", "reference name is derived from the assoc name: \"%s.%s\" <-> \"%s.%s\"", t.Name, ref.Name(), t.Name, e.Name()) + expect(ref.Type() == t.Name, "assoc-inverse edge allowed only as o2o relation of the same type") + t.Edges = append(t.Edges, &Edge{ + Type: typ, + Name: e.Name(), + Owner: t, + Inverse: ref.Name(), + Unique: e.IsUnique(), + Optional: !e.IsRequired(), + }, &Edge{ + Type: typ, + Owner: t, + Name: ref.Name(), + Unique: ref.IsUnique(), + Optional: !ref.IsRequired(), + }) + default: + panic(graphError{"edge must be either an assoc or inverse edge"}) + } + } +} + +// resolve resolves the type reference and relation of edges. +// It fails if one of the references is missing. +// +// relation definitions between A and B, where A is the owner of +// the edge and B uses this edge as a back-reference: +// +// O2O +// - A have a unique edge (E) to B, and B have a back-reference unique edge (E') for E. +// - A have a unique edge (E) to A. +// +// O2M (The "Many" side, keeps a reference to the "One" side). +// - A have an edge (E) to B (not unique), and B doesn't have a back-reference edge for E. +// - A have an edge (E) to B (not unique), and B have a back-reference unique edge (E') for E. +// +// M2O (The "One" side, keeps a reference to the "Many" side). +// - A have a unique edge (E) to B, and B doesn't have a back-reference edge for E. +// - A have a unique edge (E) to B, and B have a back-reference non-unique edge (E') for E. +// +// M2M +// - A have an edge (E) to B (not unique), and B have a back-reference non-unique edge (E') for E. +// - A have an edge (E) to A (not unique). +// +func (g *Graph) resolve(t *Type) error { + for _, e := range t.Edges { + switch { + case e.IsInverse(): + ref, ok := e.Type.HasAssoc(e.Inverse) + if !ok { + return fmt.Errorf("assoc is missing for inverse edge: \"%s.%s\"", e.Type.Name, e.Name) + } + table := t.Table() + // The name of the column is how we identify the other side. For example "A Parent has Children" + // (Parent <-O2M-> Children), or "A User has Pets" (User <-O2M-> Pet). The Children/Pet hold the + // relation, and they are identified the edge using how they call it in the inverse ("our parent") + // even though that struct is called "User". + column := snake(e.Name) + "_id" + switch a, b := ref.Unique, e.Unique; { + // If the relation column is in the inverse side/table. The rule is simple, if assoc is O2M, + // then inverse is M2O and the relation is in its table. + case a && b: + e.Rel.Type, ref.Rel.Type = O2O, O2O + case !a && b: + e.Rel.Type, ref.Rel.Type = M2O, O2M + + // if the relation column is in the assoc side. + case a && !b: + e.Rel.Type, ref.Rel.Type = O2M, M2O + table = e.Type.Table() + column = snake(ref.Name) + "_id" + + case !a && !b: + e.Rel.Type, ref.Rel.Type = M2M, M2M + table = e.Type.Label() + "_" + ref.Name + c1, c2 := ref.Owner.Label()+"_id", ref.Type.Label()+"_id" + // if the relation is from the same type: User has Friends ([]User). + // give the second column a different name (the relation name). + if c1 == c2 { + c2 = rules.Singularize(e.Name) + "_id" + } + e.Rel.Columns = append(e.Rel.Columns, c1, c2) + ref.Rel.Columns = append(ref.Rel.Columns, c1, c2) + } + e.Rel.Table, ref.Rel.Table = table, table + if !e.M2M() { + e.Rel.Columns = []string{column} + ref.Rel.Columns = []string{column} + } + // assoc with uninitialized relation. + case !e.IsInverse() && e.Rel.Type == Unk: + switch { + case !e.Unique && e.Type == t: + e.Rel.Type = M2M + e.SelfRef = true + e.Rel.Table = t.Label() + "_" + e.Name + c1, c2 := e.Owner.Label()+"_id", rules.Singularize(e.Name)+"_id" + e.Rel.Columns = append(e.Rel.Columns, c1, c2) + case e.Unique && e.Type == t: + e.Rel.Type = O2O + e.SelfRef = true + e.Rel.Table = t.Table() + case e.Unique: + e.Rel.Type = M2O + e.Rel.Table = t.Table() + default: + e.Rel.Type = O2M + e.Rel.Table = e.Type.Table() + } + if !e.M2M() { + // Unlike assoc edges with inverse, we need to choose a unique name for the + // column in order to no conflict with other types that point to this type. + e.Rel.Columns = []string{fmt.Sprintf("%s_%s_id", t.Label(), snake(rules.Singularize(e.Name)))} + } + } + } + return nil +} + +// Tables returns the schema definitions of SQL tables for the graph. +func (g *Graph) Tables() (all []*schema.Table) { + nullable := true + tables := make(map[string]*schema.Table) + for _, n := range g.Nodes { + table := schema.NewTable(n.Table()).AddPrimary(n.ID.Column()) + for _, f := range n.Fields { + table.Columns = append(table.Columns, f.Column()) + } + tables[table.Name] = table + all = append(all, table) + } + for _, n := range g.Nodes { + // foreign key + reference OR join table. + for _, e := range n.Edges { + if e.IsInverse() { + continue + } + switch e.Rel.Type { + case O2O, O2M: + // "owner" is the table that owns the relations (we set the foreign-key on) + // and "ref" is the referenced table. + owner, ref := tables[e.Rel.Table], tables[n.Table()] + column := &schema.Column{Name: e.Rel.Column(), Type: field.TypeInt, Unique: e.Rel.Type == O2O, Nullable: &nullable} + owner.Columns = append(owner.Columns, column) + owner.ForeignKeys = append(owner.ForeignKeys, &schema.ForeignKey{ + RefTable: ref, + OnDelete: schema.SetNull, + Columns: []*schema.Column{column}, + RefColumns: []*schema.Column{ref.PrimaryKey[0]}, + Symbol: fmt.Sprintf("%s_%s_%s", owner.Name, ref.Name, e.Name), + }) + case M2O: + ref, owner := tables[e.Type.Table()], tables[e.Rel.Table] + column := &schema.Column{Name: e.Rel.Column(), Type: field.TypeInt, Nullable: &nullable} + owner.Columns = append(owner.Columns, column) + owner.ForeignKeys = append(owner.ForeignKeys, &schema.ForeignKey{ + RefTable: ref, + OnDelete: schema.SetNull, + Columns: []*schema.Column{column}, + RefColumns: []*schema.Column{ref.PrimaryKey[0]}, + Symbol: fmt.Sprintf("%s_%s_%s", owner.Name, ref.Name, e.Name), + }) + case M2M: + t1, t2 := tables[n.Table()], tables[e.Type.Table()] + c1 := &schema.Column{Name: e.Rel.Columns[0], Type: field.TypeInt} + c2 := &schema.Column{Name: e.Rel.Columns[1], Type: field.TypeInt} + all = append(all, &schema.Table{ + Name: e.Rel.Table, + Columns: []*schema.Column{c1, c2}, + PrimaryKey: []*schema.Column{c1, c2}, + ForeignKeys: []*schema.ForeignKey{ + { + RefTable: t1, + OnDelete: schema.Cascade, + Columns: []*schema.Column{c1}, + RefColumns: []*schema.Column{t1.PrimaryKey[0]}, + Symbol: fmt.Sprintf("%s_%s", e.Rel.Table, c1.Name), + }, + { + RefTable: t2, + OnDelete: schema.Cascade, + Columns: []*schema.Column{c2}, + RefColumns: []*schema.Column{t2.PrimaryKey[0]}, + Symbol: fmt.Sprintf("%s_%s", e.Rel.Table, c2.Name), + }, + }, + }) + } + } + } + return +} + +func (g *Graph) typ(name string) (*Type, bool) { + for _, n := range g.Nodes { + if name == n.Name { + return n, true + } + } + return nil, false +} + +func imports() map[string]string { + var ( + specs = make(map[string]string) + b = bytes.NewBuffer([]byte("package main\n")) + ) + check(templates.ExecuteTemplate(b, "import", Type{}), "load imports") + f, err := parser.ParseFile(token.NewFileSet(), "", b, parser.ImportsOnly) + check(err, "parse imports") + for _, spec := range f.Imports { + path, err := strconv.Unquote(spec.Path.Value) + check(err, "unquote import path") + specs[filepath.Base(path)] = path + } + return specs +} + +// expect panic if the condition is false. +func expect(cond bool, msg string, args ...interface{}) { + if !cond { + panic(graphError{fmt.Sprintf(msg, args...)}) + } +} + +// check panics if the error is not nil. +func check(err error, msg string, args ...interface{}) { + if err != nil { + args = append(args, err) + panic(graphError{fmt.Sprintf(msg+": %s", args...)}) + } +} + +type graphError struct { + msg string +} + +func (p graphError) Error() string { return fmt.Sprintf("entc/gen: %s", p.msg) } + +func catch(err *error) { + if e := recover(); e != nil { + gerr, ok := e.(graphError) + if !ok { + panic(e) + } + *err = gerr + } +} + +// run runs an exec command and returns the stderr if it failed. +func run(cmd *exec.Cmd) error { + out := bytes.NewBuffer(nil) + cmd.Stderr = out + if err := cmd.Run(); err != nil { + return fmt.Errorf("entc/gen: %s", out) + } + return nil +} diff --git a/entc/gen/graph_test.go b/entc/gen/graph_test.go new file mode 100644 index 000000000..367a74dca --- /dev/null +++ b/entc/gen/graph_test.go @@ -0,0 +1,185 @@ +package gen + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" + + "github.com/stretchr/testify/require" +) + +type T1 struct { + ent.Schema +} + +func (T1) Fields() []ent.Field { + return []ent.Field{ + field.Int("age").Optional(), + field.Time("expired_at").Nullable(), + field.String("name").Default("hello"), + } +} + +func (T1) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("t2", T2.Type).Required(), + edge.To("t1", T1.Type).Unique(), + // Bidirectional unique edge (unique/"has-a" in both sides). + edge.To("t2_o2o", T2.Type).Unique(), + // Unidirectional non-unique edge ("has-many"). The reference is on the "many" side. + // For example: A user "has-many" books, but a book "has-an" owner (and only one). + edge.To("o2m", T2.Type), + // Unidirectional unique edge ("has-one"). + // For example: A user "has-an" address (and only one), but an address "has-many" users. + edge.To("m2o", T2.Type).Unique(), + // Bidirectional unique edge ("has-one" in T1 side, and "has-many" in T2 side). + edge.To("t2_m2o", T2.Type).Unique(), + // Bidirectional non-unique edge ("has-many" in T1 side, and "has-one" in T2 side). + edge.To("t2_o2m", T2.Type), + // Bidirectional non-unique edge ("has-many" in both side). + edge.To("t2_m2m", T2.Type), + // Unidirectional non-unique edge for the same type. + edge.To("t1_m2m", T1.Type), + } +} + +type T2 struct { + ent.Schema +} + +func (T2) Fields() []ent.Field { + return []ent.Field{ + field.Bool("active"), + } +} + +func (T2) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("t1", T1.Type).Ref("t2"), + edge.From("t1_o2o", T1.Type).Unique().Ref("t2_o2o"), + edge.From("t1_o2m", T1.Type).Ref("t2_m2o"), + edge.From("t1_m2o", T1.Type).Ref("t2_o2m").Unique(), + edge.From("t1_m2m", T1.Type).Ref("t2_m2m"), + } +} + +func TestNewGraph(t *testing.T) { + require := require.New(t) + graph, err := NewGraph(Config{Package: "entc/gen"}, T1{}) + require.Error(err, "should fail due to missing types") + + graph, err = NewGraph(Config{Package: "entc/gen"}, T1{}, T2{}) + require.NoError(err) + require.NotNil(graph) + require.Len(graph.Nodes, 2) + + t1 := graph.Nodes[0] + + // check fields. + require.Equal("T1", t1.Name) + require.Len(t1.Fields, 3) + for i, name := range []string{"age", "expired_at", "name"} { + require.Equal(name, t1.Fields[i].Name) + } + for i, typ := range []string{"int", "time.Time", "string"} { + require.Equal(typ, t1.Fields[i].Type.String()) + } + for i, optional := range []bool{true, false, false} { + require.Equal(optional, t1.Fields[i].Optional) + } + for i, nullable := range []bool{false, true, false} { + require.Equal(nullable, t1.Fields[i].Nullable) + } + for i, value := range []interface{}{nil, nil, "hello"} { + require.Equal(value, t1.Fields[i].Default) + require.Equal(value != nil, t1.Fields[i].HasDefault()) + } + + // check edges. + require.Len(t1.Edges, 9) + for i, name := range []string{"t2", "t1"} { + require.Equal(name, t1.Edges[i].Name) + } + for i, typ := range []*Type{graph.Nodes[1], graph.Nodes[0]} { + require.Equal(typ, t1.Edges[i].Type, "edge should point to the right type") + } + for i, optional := range []bool{false, true} { + require.Equal(optional, t1.Edges[i].Optional) + } + for i, unique := range []bool{false, true} { + require.Equal(unique, t1.Edges[i].Unique) + } + for i, inverse := range []bool{false, false} { + require.Equal(inverse, t1.Edges[i].IsInverse()) + } + + t2 := graph.Nodes[1] + f1, e1 := t2.Fields[0], t2.Edges[0] + require.Equal("bool", f1.Type.String()) + require.Equal("active", f1.Name) + require.Equal("t1", e1.Name) + require.True(e1.IsInverse()) + require.Equal("t2", e1.Inverse) + require.Equal(graph.Nodes[0], e1.Type) +} + +func TestRelation(t *testing.T) { + require := require.New(t) + graph, err := NewGraph(Config{Package: "entc/gen"}, T1{}) + require.Error(err, "should fail due to missing types") + + graph, err = NewGraph(Config{Package: "entc/gen"}, T1{}, T2{}) + require.NoError(err) + require.NotNil(graph) + require.Len(graph.Nodes, 2) + + t1, t2 := graph.Nodes[0], graph.Nodes[1] + // unidirectional one 2 one. + require.Equal(O2O, t1.Edges[1].Rel.Type) + // bidirectional one to one. + require.Equal(O2O, t1.Edges[2].Rel.Type) + require.Equal(O2O, t2.Edges[1].Rel.Type) + // unidirectional one 2 many. + require.Equal(O2M, t1.Edges[3].Rel.Type) + // unidirectional many 2 one. + require.Equal(M2O, t1.Edges[4].Rel.Type) + // bidirectional many 2 one. + require.Equal(M2O, t1.Edges[5].Rel.Type) + require.Equal(O2M, t2.Edges[2].Rel.Type) + // bidirectional one 2 many. + require.Equal(O2M, t1.Edges[6].Rel.Type) + require.Equal(M2O, t2.Edges[3].Rel.Type) + // bidirectional many 2 many. + require.Equal(M2M, t1.Edges[7].Rel.Type) + require.Equal(M2M, t2.Edges[4].Rel.Type) + // unidirectional many 2 many. + require.Equal(M2M, t1.Edges[8].Rel.Type) +} + +func TestGraph_Gen(t *testing.T) { + require := require.New(t) + target := filepath.Join(os.TempDir(), "ent") + require.NoError(os.MkdirAll(target, os.ModePerm), "creating tmpdir") + defer os.Remove(target) + graph, err := NewGraph(Config{Package: "entc/gen", Target: target}, T1{}, T2{}) + require.NoError(err) + require.NotNil(graph) + require.NoError(graph.Gen()) + // ensure graph files were generated. + for _, name := range []string{"ent", "client", "config", "example_test"} { + _, err := os.Stat(fmt.Sprintf("%s/%s.go", target, name)) + require.NoError(err) + } + // ensure entity files were generated. + for _, format := range []string{"%s", "%s_create", "%s_update", "%s_delete", "%s_query"} { + for _, name := range []string{"t1", "t2"} { + _, err := os.Stat(fmt.Sprintf(fmt.Sprintf("%s/%s.go", target, format), name)) + require.NoError(err) + } + } +} diff --git a/entc/gen/template.go b/entc/gen/template.go new file mode 100644 index 000000000..e8c0b08dd --- /dev/null +++ b/entc/gen/template.go @@ -0,0 +1,95 @@ +package gen + +import ( + "fmt" + "text/template" +) + +//go:generate go-bindata -pkg=gen ./template/... + +var ( + // Templates holds the template information for a file that the graph is generating. + Templates = []struct { + Name string + Format func(*Type) string + }{ + { + Name: "create", + Format: pkgf("%s_create.go"), + }, + { + Name: "update", + Format: pkgf("%s_update.go"), + }, + { + Name: "delete", + Format: pkgf("%s_delete.go"), + }, + { + Name: "query", + Format: pkgf("%s_query.go"), + }, + { + Name: "model", + Format: pkgf("%s.go"), + }, + { + Name: "where", + Format: pkgf("%s/where.go"), + }, + { + Name: "meta", + Format: func(t *Type) string { + return fmt.Sprintf("%s/%s.go", t.Package(), t.Package()) + }, + }, + } + // GraphTemplates holds the templates applied on the graph. + GraphTemplates = []struct { + Name string + Format string + }{ + { + Name: "base", + Format: "ent.go", + }, + { + Name: "client", + Format: "client.go", + }, + { + Name: "tx", + Format: "tx.go", + }, + { + Name: "config", + Format: "config.go", + }, + { + Name: "migrate", + Format: "migrate/migrate.go", + }, + { + Name: "schema", + Format: "migrate/schema.go", + }, + { + Name: "example", + Format: "example_test.go", + }, + } + // templates holds the Go templates for the code generation. + templates = tmpl() +) + +func tmpl() *template.Template { + t := template.New("templates").Funcs(funcs) + for _, asset := range AssetNames() { + t = template.Must(t.Parse(string(MustAsset(asset)))) + } + return t +} + +func pkgf(s string) func(t *Type) string { + return func(t *Type) string { return fmt.Sprintf(s, t.Package()) } +} diff --git a/entc/gen/template/base.tmpl b/entc/gen/template/base.tmpl new file mode 100644 index 000000000..477db8702 --- /dev/null +++ b/entc/gen/template/base.tmpl @@ -0,0 +1,247 @@ +{{ define "base" }} + +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +// Predicate is an alias to ent.Predicate. +type Predicate = ent.Predicate + +// Or groups list of predicates with the or operator between them. +func Or(predicates ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + for i, p := range predicates { + if i > 0 { + s.Or() + } + p.SQL(s) + } + }, + Gremlin: func(tr *dsl.Traversal) { + trs := make([]interface{}, 0, len(predicates)) + for _, p := range predicates { + t := __.New() + p.Gremlin(t) + trs = append(trs, t) + } + tr.Where(__.Or(trs...)) + }, + } +} + +// Not applies the not operator on the given predicate. +func Not(p ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + p.SQL(s.Not()) + }, + Gremlin: func(tr *dsl.Traversal) { + t := __.New() + p.Gremlin(t) + tr.Where(__.Not(t)) + }, + } +} + +// Order applies an ordering on the traversal. +type Order ent.Predicate + +{{ range $f, $order := order }} + {{ $f = pascal $f }} + // {{ $f }} applies the given fields in {{ upper $f }} order. + func {{ $f }}(fields ...string) Order { + return Order{ + SQL: func(s *sql.Selector) { + for _, f := range fields { + s.OrderBy(sql.{{ $f }}(f)) + } + }, + Gremlin: func(tr *dsl.Traversal) { + for _, f := range fields { + tr.By(f, dsl.{{ pascal $order }}) + } + }, + } + } +{{ end }} + +// Aggregate applies an aggregation step on the group-by traversal/selector. +type Aggregate struct { + // SQL the column wrapped with the aggregation function. + SQL func(*sql.Selector) string + // Gremlin gets two labels as parameters. The first used in the `As` step for the predicate, + // and the second is an optional name for the next predicates (or for later usage). + Gremlin func(string, string) (string, *dsl.Traversal) +} + + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate({{ $pkg }}.As({{ $pkg }}.Sum(field1), "sum_field1"), ({{ $pkg }}.As({{ $pkg }}.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +// +func As(fn Aggregate, end string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.As(fn.SQL(s), end) + }, + Gremlin: func(start, _ string) (string, *dsl.Traversal) { + return fn.Gremlin(start, end) + }, + } +} + +{{ range $name, $withField := aggregate }} + {{ $fn := pascal $name }} + // Default{{ $fn }}Label is the default label name for the {{ $fn }} aggregation function. + // It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. + // In order to {{ quote $name }} 2 or more fields and avoid conflicting, use the `{{ $pkg }}.As({{ $pkg }}.{{ $fn }}(field), "custom_name")` + // function with custom name in order to override it. + const Default{{ $fn }}Label = {{ quote $name }} + + // {{ $fn }} applies the {{ quote $name }} aggregation function on {{ if $withField }}the given field of {{ end }}each group. + func {{ $fn }}({{ if $withField }}field string{{ end }}) Aggregate { + return Aggregate { + SQL: func(s *sql.Selector) string { + return sql.{{ if eq $fn "Mean" }}Avg{{ else }}{{ $fn }}{{ end }}({{ if $withField }}s.C(field){{ else }}"*"{{ end }}) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = Default{{ $fn }}Label + } + return end, __.As(start).{{ if $withField }}Unfold().Values(field).{{ $fn }}(){{ else }}{{ $fn }}(dsl.Local){{ end }}.As(end) + }, + } + } +{{ end }} + +// ErrNotFound returns when trying to fetch a specific entity and it was not found in the database. +type ErrNotFound struct { + label string +} + +// Error implements the error interface. +func (e *ErrNotFound) Error() string { + return fmt.Sprintf("{{ $pkg }}: %s not found", e.label) +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + _, ok := err.(*ErrNotFound) + return ok +} + +// ErrNotSingular returns when trying to fetch a singular entity and more then one was found in the database. +type ErrNotSingular struct { + label string +} + +// Error implements the error interface. +func (e *ErrNotSingular) Error() string { + return fmt.Sprintf("{{ $pkg }}: %s not singular", e.label) +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + _, ok := err.(*ErrNotSingular) + return ok +} + + +// ErrConstraintFailed returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or field uniqueness. +type ErrConstraintFailed struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ErrConstraintFailed) Error() string { + return fmt.Sprintf("{{ $pkg }}: unique constraint failed: %s", e.msg) +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ErrConstraintFailed) Unwrap() error { + return e.wrap +} + +// Code implements the dsl.Node interface. +func (e ErrConstraintFailed) Code() (string, []interface{}) { + return strconv.Quote(e.prefix() + e.msg), nil +} + +func (e *ErrConstraintFailed) UnmarshalGraphson(b []byte) error { + var v [1]*string + if err := graphson.Unmarshal(b, &v); err != nil { + return err + } + if v[0] == nil { + return fmt.Errorf("{{ $pkg }}: missing string value") + } + if !strings.HasPrefix(*v[0], e.prefix()) { + return fmt.Errorf("{{ $pkg }}: invalid string for error: %s", *v[0]) + } + e.msg = strings.TrimPrefix(*v[0], e.prefix()) + return nil +} + +// prefix returns the prefix used for gremlin constants. +func (ErrConstraintFailed) prefix() string { return "Error: " } + +// NewErrUniqueField creates a constraint error for unique fields. +func NewErrUniqueField(label, field string, v interface{}) *ErrConstraintFailed { + return &ErrConstraintFailed{msg: fmt.Sprintf("field %s.%s with value: %#v", label, field, v)} +} + +// NewErrUniqueEdge creates a constraint error for unique edges. +func NewErrUniqueEdge(label, edge, id string) *ErrConstraintFailed { + return &ErrConstraintFailed{msg: fmt.Sprintf("edge %s.%s with id: %#v", label, edge, id)} +} + +// IsConstraintFailure returns a boolean indicating whether the error is a constraint failure. +func IsConstraintFailure(err error) bool { + _, ok := err.(*ErrConstraintFailed) + return ok +} + +// isConstantError indicates if the given response holds a gremlin constant containing an error. +func isConstantError(r *gremlin.Response) (*ErrConstraintFailed, bool) { + e := &ErrConstraintFailed{} + if err := graphson.Unmarshal(r.Result.Data, e); err != nil { + return nil, false + } + return e, true +} + +func isSQLConstraintError(err error) (*ErrConstraintFailed, bool) { + // Error number 1062 is ER_DUP_ENTRY in mysql, and "UNIQUE constraint failed" is SQLite prefix. + if msg := err.Error(); strings.HasPrefix(msg, "Error 1062") || strings.HasPrefix(msg, "UNIQUE constraint failed") { + return &ErrConstraintFailed{msg, err}, true + } + return nil, false +} + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%s: %v", err.Error(), rerr) + } + if err, ok := isSQLConstraintError(err); ok { + return err + } + return err +} + +{{ $id := (index $.Nodes 0).ID.Type }} +// keys returns the keys/ids from the edge map. +func keys(m map[{{ $id }}]struct{}) []{{ $id }} { + s := make([]{{ $id }}, 0, len(m)) + for id, _ := range m { + s = append(s, id) + } + return s +} +{{ end }} diff --git a/entc/gen/template/builder/create.tmpl b/entc/gen/template/builder/create.tmpl new file mode 100644 index 000000000..abd6e0454 --- /dev/null +++ b/entc/gen/template/builder/create.tmpl @@ -0,0 +1,87 @@ +{{ define "create" }} +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +{{ $builder := print (pascal $.Name) "Create" }} +{{ $receiver := receiver $builder }} + +// {{ $builder }} is the builder for creating a {{ $.Name }} entity. +type {{ $builder }} struct { + config + {{ range $_, $f := $.Fields }} + {{- $f.StructField }} *{{ $f.Type }} + {{ end }} + {{- range $_, $e := $.Edges }} + {{- $e.StructField }} map[{{ $.ID.Type }}]struct{} + {{ end -}} +} + +{{ with extend $ "Builder" $builder }} + {{ template "setter" . }} +{{ end }} + +// Save creates the {{ $.Name }} in the database. +func ({{ $receiver }} *{{ $builder }}) Save(ctx context.Context) (*{{ $.Name }}, error) { + {{ range $_, $f := $.Fields -}} + {{- if or $f.HasDefault (not $f.Optional) -}} + if {{ $receiver }}.{{ $f.StructField }} == nil { + {{ if $f.HasDefault -}} + v := {{ $.Package }}.{{ $f.DefaultConstant }} + {{ $receiver }}.{{ $f.StructField }} = &v + {{ else -}} + return nil, errors.New("{{ $pkg }}: missing required field \"{{ $f.Name }}\"") + {{ end -}} + } + {{ end -}} + {{ with $f.Validators -}} + {{/* add nullable check only for optional fields without default value */ -}} + {{ $nullable := and $f.Optional (not $f.HasDefault) -}} + {{- if $nullable }} if {{ $receiver }}.{{ $f.StructField }} != nil { {{ end -}} + if err := {{ $.Package }}.{{ $f.Validator }}(*{{ $receiver }}.{{ $f.StructField }}); err != nil { + return nil, fmt.Errorf("{{ $pkg }}: validator failed for field \"{{ $f.Name }}\": %v", err) + } + {{- if $nullable }} } {{ end }} + {{ end -}} + {{ end -}} + {{- range $_, $e := $.Edges }} + {{- if $e.Unique -}} + if len({{ $receiver }}.{{ $e.StructField }}) > 1 { + return nil, errors.New("{{ $pkg }}: multiple assignments on a unique edge \"{{ $e.Name }}\"") + } + {{ end -}} + {{- if not $e.Optional -}} + if {{ $receiver }}.{{ $e.StructField }} == nil { + return nil, errors.New("{{ $pkg }}: missing required edge \"{{ $e.Name }}\"") + } + {{ end -}} + {{ end -}} + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlSave(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinSave(ctx) + default: + return nil, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func ({{ $receiver }} *{{ $builder }}) SaveX(ctx context.Context) *{{ $.Name }} { + v, err := {{ $receiver }}.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +{{ with extend $ "Builder" $builder }} + {{ template "dialect/sql/create" . }} +{{ end }} + +{{ with extend $ "Builder" $builder }} + {{ template "dialect/gremlin/create" . }} +{{ end }} + +{{ end }} diff --git a/entc/gen/template/builder/delete.tmpl b/entc/gen/template/builder/delete.tmpl new file mode 100644 index 000000000..c5cbb81f4 --- /dev/null +++ b/entc/gen/template/builder/delete.tmpl @@ -0,0 +1,67 @@ +{{ define "delete" }} +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +{{ $builder := print (pascal $.Name) "Delete" }} +{{ $receiver := receiver $builder }} +// {{ $builder }} is the builder for deleting a {{ pascal $.Name }} entity. +type {{ $builder }} struct { + config + predicates []ent.Predicate +} + + +// Where adds a new predicate for the builder. +func ({{ $receiver}} *{{ $builder }}) Where(ps ...ent.Predicate) *{{ $builder }} { + {{ $receiver}}.predicates = append({{ $receiver}}.predicates, ps...) + return {{ $receiver }} +} + +// Exec executes the deletion query. +func ({{ $receiver}} *{{ $builder }}) Exec(ctx context.Context) error { + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlExec(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinExec(ctx) + default: + return errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) ExecX(ctx context.Context) { + if err := {{ $receiver }}.Exec(ctx); err != nil { + panic(err) + } +} + +{{ with extend $ "Builder" $builder }} + {{ template "dialect/sql/delete" . }} +{{ end }} + +{{ with extend $ "Builder" $builder }} + {{ template "dialect/gremlin/delete" . }} +{{ end }} + +{{ $onebuilder := print $builder "One" }} +{{ $oneReceiver := receiver $onebuilder }} + +// {{ $onebuilder }} is the builder for deleting a single {{ $.Name }} entity. +type {{ $onebuilder }} struct { + {{ $receiver }} *{{ $builder }} +} + +// Exec executes the deletion query. +func ({{ $oneReceiver }} *{{ $onebuilder }}) Exec(ctx context.Context) error { + return {{ $oneReceiver }}.{{ $receiver }}.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func ({{ $oneReceiver }} *{{ $onebuilder }}) ExecX(ctx context.Context) { + {{ $oneReceiver }}.{{ $receiver }}.ExecX(ctx) +} + +{{ end }} diff --git a/entc/gen/template/builder/query.tmpl b/entc/gen/template/builder/query.tmpl new file mode 100644 index 000000000..8fff8f040 --- /dev/null +++ b/entc/gen/template/builder/query.tmpl @@ -0,0 +1,397 @@ +{{ define "query" }} +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +{{ $builder := print (pascal $.Name) "Query" }} +{{ $receiver := receiver $builder }} + +// {{ $builder }} is the builder for querying {{ pascal $.Name }} entities. +type {{ $builder }} struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func ({{ $receiver }} *{{ $builder }}) Where(ps ...ent.Predicate) *{{ $builder }} { + {{ $receiver}}.predicates = append({{ $receiver }}.predicates, ps...) + return {{ $receiver }} +} + +// Limit adds a limit step to the query. +func ({{ $receiver }} *{{ $builder }}) Limit(limit int) *{{ $builder }} { + {{ $receiver }}.limit = &limit + return {{ $receiver }} +} + +// Order adds an order step to the query. +func ({{ $receiver }} *{{ $builder }}) Order(o ...Order) *{{ $builder }} { + {{ $receiver }}.order = append({{ $receiver }}.order, o...) + return {{ $receiver }} +} + +{{/* this code has similarity with edge queries in client.tmpl */}} +{{ range $_, $e := $.Edges }} + {{ $edge_builder := print (pascal $e.Type.Name) "Query" }} + // Query{{ pascal $e.Name }} chains the current query on the {{ $e.Name }} edge. + func ({{ $receiver }} *{{ $builder }}) Query{{ pascal $e.Name }}() *{{ $edge_builder }} { + query := &{{ $edge_builder }}{config: {{ $receiver }}.config} + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + {{- if $e.M2M }} + {{ $i := 1 }}{{ $j := 0 }}{{- if $e.IsInverse }}{{ $i = 0 }}{{ $j = 1 }}{{ end -}} + t1 := sql.Table({{ $e.Type.Package }}.Table) + t2 := {{ $receiver }}.sqlQuery() + t2.Select(t2.C({{ $.Package }}.{{ $.ID.Constant }})) + t3 := sql.Table({{ $.Package }}.{{ $e.TableConstant }}) + t4 := sql.Select(t3.C({{ $.Package }}.{{ $e.PKConstant }}[{{ $i }}])). + From(t3). + Join(t2). + On(t3.C({{ $.Package }}.{{ $e.PKConstant }}[{{ $j }}]), t2.C({{ $.Package }}.{{ $.ID.Constant }})) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}), t4.C({{ $.Package }}.{{ $e.PKConstant }}[{{ $i }}])) + {{- else if or $e.M2O (and $e.O2O $e.IsInverse) }}{{/* M2O || (O2O with inverse edge) */}} + t1 := sql.Table({{ $e.Type.Package }}.Table) + t2 := {{ $receiver }}.sqlQuery() + t2.Select(t2.C({{ $.Package }}.{{ $e.ColumnConstant }})) + query.sql = sql.Select(t1.Columns({{ $e.Type.Package }}.Columns...)...). + From(t1). + Join(t2). + On(t1.C({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}), t2.C({{ $.Package }}.{{ $e.ColumnConstant }})) + {{- else }}{{/* O2M || (O2O with assoc edge) */}} + t1 := sql.Table({{ $e.Type.Package }}.Table) + t2 := {{ $receiver }}.sqlQuery() + t2.Select(t2.C({{ $.Package }}.{{ $.ID.Constant }})) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C({{ $.Package }}.{{ $e.ColumnConstant }}), t2.C({{ $.Package }}.{{ $.ID.Constant }})) + {{- end }} + case dialect.Neptune: + gremlin := {{ $receiver }}.gremlinQuery() + {{- if $e.SelfRef }} + query.gremlin = gremlin.Both({{ $.Package }}.{{ $e.Constant }}) + {{- else if $e.IsInverse }} + query.gremlin = gremlin.InE({{ $e.Type.Package }}.{{ $e.Constant }}).OutV() + {{- else }} + query.gremlin = gremlin.OutE({{ $.Package }}.{{ $e.Constant }}).InV() + {{- end }} + } + return query + } +{{ end }} + + +// Get returns a {{ $.Name }} entity by its id. +func ({{ $receiver }} *{{ $builder }}) Get(ctx context.Context, id {{ $.ID.Type }}) (*{{ $.Name }}, error) { + return {{ $receiver }}.Where({{ $.Package }}.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) GetX(ctx context.Context, id {{ $.ID.Type }}) *{{ $.Name }} { + {{ $.Receiver }}, err := {{ $receiver }}.Get(ctx, id) + if err != nil { + panic(err) + } + return {{ $.Receiver }} +} + +// First returns the first {{ $.Name }} entity in the query. Returns *ErrNotFound when no {{ lower $.Name }} was found. +func ({{ $receiver }} *{{ $builder }}) First(ctx context.Context) (*{{ $.Name }}, error) { + {{ plural $.Receiver }}, err := {{ $receiver }}.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len({{ plural $.Receiver }}) == 0 { + return nil, &ErrNotFound{ {{ $.Package }}.Label} + } + return {{ plural $.Receiver }}[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) FirstX(ctx context.Context) *{{ $.Name }} { + {{ $.Receiver }}, err := {{ $receiver }}.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return {{ $.Receiver }} +} + +// FirstID returns the first {{ $.Name }} id in the query. Returns *ErrNotFound when no id was found. +func ({{ $receiver }} *{{ $builder }}) FirstID(ctx context.Context) (id {{ $.ID.Type }}, err error) { + var ids []{{ $.ID.Type }} + if ids, err = {{ $receiver }}.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{ {{ $.Package }}.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) FirstXID(ctx context.Context) {{ $.ID.Type }} { + id, err := {{ $receiver }}.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only {{ $.Name }} entity in the query, returns an error if not exactly one entity was returned. +func ({{ $receiver }} *{{ $builder }}) Only(ctx context.Context) (*{{ $.Name }}, error) { + {{ plural $.Receiver }}, err := {{ $receiver }}.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len({{ plural $.Receiver }}) { + case 1: + return {{ plural $.Receiver }}[0], nil + case 0: + return nil, &ErrNotFound{ {{ $.Package }}.Label} + default: + return nil, &ErrNotSingular{ {{ $.Package }}.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) OnlyX(ctx context.Context) *{{ $.Name }} { + {{ $.Receiver }}, err := {{ $receiver }}.Only(ctx) + if err != nil { + panic(err) + } + return {{ $.Receiver }} +} + +// OnlyID returns the only {{ $.Name }} id in the query, returns an error if not exactly one id was returned. +func ({{ $receiver }} *{{ $builder }}) OnlyID(ctx context.Context) (id {{ $.ID.Type }}, err error) { + var ids []{{ $.ID.Type }} + if ids, err = {{ $receiver }}.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{ {{ $.Package }}.Label} + default: + err = &ErrNotSingular{ {{ $.Package }}.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) OnlyXID(ctx context.Context) {{ $.ID.Type }} { + id, err := {{ $receiver }}.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of {{ plural $.Name }}. +func ({{ $receiver }} *{{ $builder }}) All(ctx context.Context) ([]*{{ $.Name }}, error) { + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlAll(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinAll(ctx) + default: + return nil, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) AllX(ctx context.Context) []*{{ $.Name }} { + {{ plural $.Receiver }}, err := {{ $receiver }}.All(ctx) + if err != nil { + panic(err) + } + return {{ plural $.Receiver }} +} + +// IDs executes the query and returns a list of {{ $.Name }} ids. +func ({{ $receiver }} *{{ $builder }}) IDs(ctx context.Context) ([]{{ $.ID.Type }}, error) { + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlIDs(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinIDs(ctx) + default: + return nil, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) IDsX(ctx context.Context) []{{ $.ID.Type }} { + ids, err := {{ $receiver }}.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func ({{ $receiver }} *{{ $builder }}) Count(ctx context.Context) (int, error) { + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlCount(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinCount(ctx) + default: + return 0, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) CountX(ctx context.Context) int { + count, err := {{ $receiver }}.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func ({{ $receiver }} *{{ $builder }}) Exist(ctx context.Context) (bool, error) { + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlExist(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinExist(ctx) + default: + return false, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) ExistX(ctx context.Context) bool { + exist, err := {{ $receiver }}.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +{{ $groupBuilder := pascal $.Name | printf "%sGroupBy" }} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: {{ join (keys aggregate) ", " }}. +{{- with len $.Fields }} +{{- $f := index $.Fields 0 }} +// +// Example: +// +// var v []struct { +// {{ pascal $f.Name }} {{ $f.Type }} `{{ $f.StructTag }}` +// Count int `json:"count,omitempty"` +// } +// +// client.{{ pascal $.Name }}.Query(). +// GroupBy({{ $.Package }}.{{ $f.Constant }}). +// Aggregate({{ $pkg }}.Count()). +// Scan(ctx, &v) +// +{{- end }} +func ({{ $receiver }} *{{ $builder }}) GroupBy(field string, fields ...string) *{{ $groupBuilder }} { + group := &{{ $groupBuilder }}{config: {{ $receiver }}.config} + group.fields = append([]string{field}, fields...) + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = {{ $receiver }}.sqlQuery() + case dialect.Neptune: + group.gremlin = {{ $receiver }}.gremlinQuery() + } + return group +} + + +{{ with extend $ "Builder" $builder "Package" $pkg }} + {{ template "dialect/sql/query" . }} +{{ end }} + +{{ with extend $ "Builder" $builder }} + {{ template "dialect/gremlin/query" . }} +{{ end }} + + +{{ $groupReceiver := receiver $groupBuilder }} + +// {{ $builder }} is the builder for group-by {{ pascal $.Name }} entities. +type {{ $groupBuilder }} struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func ({{ $groupReceiver }} *{{ $groupBuilder }}) Aggregate(fns ...Aggregate) *{{ $groupBuilder }} { + {{ $groupReceiver }}.fns = append({{ $groupReceiver }}.fns, fns...) + return {{ $groupReceiver }} +} + +// Scan applies the group-by query and scan the result into the given value. +func ({{ $groupReceiver }} *{{ $groupBuilder }}) Scan(ctx context.Context, v interface{}) error { + switch {{ $groupReceiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $groupReceiver }}.sqlScan(ctx, v) + case dialect.Neptune: + return {{ $groupReceiver }}.gremlinScan(ctx, v) + default: + return errors.New("{{ $groupReceiver }}: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func ({{ $groupReceiver }} *{{ $groupBuilder }}) ScanX(ctx context.Context, v interface{}) { + if err := {{ $groupReceiver }}.Scan(ctx, v); err != nil { + panic(err) + } +} + +{{ range $_, $t := primitives }} + {{ $f := pascal $t | plural }} + // {{ $f }} returns list of {{ plural $t }} from group-by. It is only allowed when querying group-by with one field. + func ({{ $groupReceiver }} *{{ $groupBuilder }}) {{ $f }}(ctx context.Context) ([]{{ $t }}, error) { + if len({{ $groupReceiver }}.fields) > 1 { + return nil, errors.New("{{ $pkg }}: {{ $groupBuilder }}.{{ $f }} is not achievable when grouping more than 1 field") + } + var v []{{ $t }} + if err := {{ $groupReceiver }}.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil + } + + // {{ $f }}X is like {{ $f }}, but panics if an error occurs. + func ({{ $groupReceiver }} *{{ $groupBuilder }}) {{ $f }}X(ctx context.Context) []{{ $t }} { + v, err := {{ $groupReceiver }}.{{ $f }}(ctx) + if err != nil { + panic(err) + } + return v + } +{{ end }} + +{{ with extend $ "Builder" $groupBuilder }} + {{ template "dialect/sql/group" . }} +{{ end }} + +{{ with extend $ "Builder" $groupBuilder }} + {{ template "dialect/gremlin/group" . }} +{{ end }} + +{{ end }} diff --git a/entc/gen/template/builder/setter.tmpl b/entc/gen/template/builder/setter.tmpl new file mode 100644 index 000000000..9e519d037 --- /dev/null +++ b/entc/gen/template/builder/setter.tmpl @@ -0,0 +1,69 @@ +{{ define "setter" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +{{ range $_, $f := $.Fields }} + {{ $p := lower (printf "%.1s" $f.Type) }} + {{ $func := print "Set" (pascal $f.Name) }} + // {{ $func }} sets the {{ $f.Name }} field. + func ({{ $receiver }} *{{ $builder }}) {{ $func }}({{ $p }} {{ $f.Type }}) *{{ $builder }} { + {{ $receiver }}.{{ $f.StructField }} = &{{ $p }} + return {{ $receiver }} + } + {{ if or $f.Optional $f.HasDefault }} + {{ $nillableFunc := print "SetNillable" (pascal $f.Name) }} + // {{ $nillableFunc }} sets the {{ $f.Name }} field if the given value is not nil. + func ({{ $receiver }} *{{ $builder }}) {{ $nillableFunc }}({{ $p }} *{{ $f.Type }}) *{{ $builder }} { + if {{ $p }} != nil { + {{ $receiver }}.{{ $func }}(*{{ $p }}) + } + return {{ $receiver }} + } + {{ end }} +{{ end }} + +{{ range $_, $e := $.Edges }} + {{ $op := "add" }}{{ if $e.Unique }}{{ $op = "set" }}{{ end }} + {{ $idsFunc := print (pascal $op) (singular $e.Name | pascal) "IDs" }}{{ if $e.Unique }}{{ $idsFunc = print (pascal $op) (pascal $e.Name) "ID" }}{{ end }} + // {{ $idsFunc }} {{ $op }}s the {{ $e.Name }} edge to {{ $e.Type.Name }} by id{{ if not $e.Unique }}s{{ end }}. + func ({{ $receiver }} *{{ $builder }}) {{ $idsFunc }}({{ if $e.Unique }}id{{ else }}ids ...{{ end }} {{ $.ID.Type }}) *{{ $builder }} { + if {{ $receiver }}.{{ $e.StructField }} == nil { + {{ $receiver }}.{{ $e.StructField }} = make(map[{{ $.ID.Type }}]struct{}) + } + {{ if $e.Unique -}} + {{ $receiver }}.{{ $e.StructField }}[id] = struct{}{} + {{- else -}} + for i := range ids { + {{ $receiver }}.{{ $e.StructField }}[ids[i]] = struct{}{} + } + {{- end }} + return {{ $receiver }} + } + {{ if and $e.Unique $e.Optional }} + {{ $nillableIDsFunc := print "SetNillable" (pascal $e.Name) "ID" }} + // {{ $nillableIDsFunc }} sets the {{ $e.Name }} edge to {{ $e.Type.Name }} by id if the given value is not nil. + func ({{ $receiver }} *{{ $builder }}) {{ $nillableIDsFunc }}(id *{{ $.ID.Type }}) *{{ $builder }} { + if id != nil { + {{ $receiver}} = {{ $receiver }}.{{ $idsFunc }}(*id) + } + return {{ $receiver }} + } + {{ end }} + {{ $p := lower (printf "%.1s" $e.Type.Name) }} + {{ if eq $p $receiver }} {{ $p = "v" }} {{ end }} + {{ $func := print (pascal $op) (pascal $e.Name) }} + // {{ $func }} {{ $op }}s the {{ $e.Name }} edge{{if not $e.Unique}}s{{ end }} to {{ $e.Type.Name }}. + func ({{ $receiver }} *{{ $builder }}) {{ $func }}({{ $p }} {{ if not $e.Unique }}...{{ end }}*{{ $e.Type.Name}}) *{{ $builder }} { + {{ if $e.Unique -}} + return {{ $receiver }}.{{ $idsFunc }}({{ $p }}.ID) + {{- else -}} + ids := make([]{{ $.ID.Type }}, len({{ $p }})) + {{ $i := "i" }}{{ if eq $i $p }}{{ $i = "j" }}{{ end -}} + for {{ $i }} := range {{ $p }} { + ids[{{ $i }}] = {{ $p }}[{{ $i }}].ID + } + return {{ $receiver }}.{{ $idsFunc }}(ids...) + {{- end }} + } +{{ end }} +{{ end }} diff --git a/entc/gen/template/builder/update.tmpl b/entc/gen/template/builder/update.tmpl new file mode 100644 index 000000000..6b4e3ebc9 --- /dev/null +++ b/entc/gen/template/builder/update.tmpl @@ -0,0 +1,224 @@ +{{ define "update" }} +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +{{ $builder := print (pascal $.Name) "Update" }} +{{ $receiver := receiver $builder }} +// {{ $builder }} is the builder for updating {{ $.Name }} entities. +type {{ $builder }} struct { + config + {{- template "update/fields" $ -}} + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func ({{ $receiver}} *{{ $builder }}) Where(ps ...ent.Predicate) *{{ $builder }} { + {{ $receiver}}.predicates = append({{ $receiver}}.predicates, ps...) + return {{ $receiver }} +} + +{{ with extend $ "Builder" $builder }} + {{ template "setter" . }} +{{ end }} + +{{ with extend $ "Builder" $builder }} + {{ template "update/edges" . }} +{{ end }} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func ({{ $receiver }} *{{ $builder }}) Save(ctx context.Context) (int, error) { + {{ with extend $ "Receiver" $receiver "Package" $pkg "ZeroValue" 0 -}} + {{ template "update/validators" . }} + {{- end -}} + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlSave(ctx) + case dialect.Neptune: + vertices, err := {{ $receiver }}.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) SaveX(ctx context.Context) int { + affected, err := {{ $receiver }}.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func ({{ $receiver }} *{{ $builder }}) Exec(ctx context.Context) error { + _, err := {{ $receiver }}.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func ({{ $receiver }} *{{ $builder }}) ExecX(ctx context.Context) { + if err := {{ $receiver }}.Exec(ctx); err != nil { + panic(err) + } +} + +{{ with extend $ "Builder" $builder "Package" $pkg }} + {{ template "dialect/sql/update" . }} +{{ end }} + +{{ with extend $ "Builder" $builder }} + {{ template "dialect/gremlin/update" . }} +{{ end }} + +{{ $onebuilder := printf "%sOne" $builder }} +{{ $receiver = receiver $onebuilder }} + +// {{ $onebuilder }} is the builder for updating a single {{ $.Name }} entity. +type {{ $onebuilder }} struct { + config + id {{ $.ID.Type }} + {{- template "update/fields" $ }} +} + +{{ with extend $ "Builder" $onebuilder }} + {{ template "setter" . }} +{{ end }} + + +{{ with extend $ "Builder" $onebuilder }} + {{ template "update/edges" . }} +{{ end }} + +// Save executes the query and returns the updated entity. +func ({{ $receiver }} *{{ $onebuilder }} ) Save(ctx context.Context) (*{{ $.Name }}, error) { + {{ with extend $ "Receiver" $receiver "Package" $pkg "ZeroValue" "nil" -}} + {{ template "update/validators" . }} + {{- end -}} + switch {{ $receiver }}.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return {{ $receiver }}.sqlSave(ctx) + case dialect.Neptune: + return {{ $receiver }}.gremlinSave(ctx) + default: + return nil, errors.New("{{ $pkg }}: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func ({{ $receiver }} *{{ $onebuilder }}) SaveX(ctx context.Context) *{{ $.Name }} { + {{ $.Receiver }}, err := {{ $receiver }}.Save(ctx) + if err != nil { + panic(err) + } + return {{ $.Receiver }} +} + +// Exec executes the query on the entity. +func ({{ $receiver }} *{{ $onebuilder }}) Exec(ctx context.Context) error { + _, err := {{ $receiver }}.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func ({{ $receiver }} *{{ $onebuilder }}) ExecX(ctx context.Context) { + if err := {{ $receiver }}.Exec(ctx); err != nil { + panic(err) + } +} + +{{ with extend $ "Builder" $onebuilder "Package" $pkg }} + {{ template "dialect/sql/update" . }} +{{ end }} + +{{ with extend $ "Builder" $onebuilder }} + {{ template "dialect/gremlin/update" . }} +{{ end }} + +{{ end }} + +{{/* shared struct fields between the twp updaters */}} +{{ define "update/fields"}} +{{ range $_, $f := $.Fields }} + {{- $f.StructField }} *{{ $f.Type }} +{{ end }} +{{- range $_, $e := $.Edges }} + {{- $e.StructField }} map[{{ $.ID.Type }}]struct{} +{{ end }} +{{- range $_, $e := $.Edges }} + {{- $p := "removed" }}{{ if $e.Unique }}{{ $p = "cleared" }}{{ end }} + {{- print $p (pascal $e.Name) }} {{ if $e.Unique }}bool{{ else }}map[{{ $.ID.Type }}]struct{}{{ end }} +{{ end -}} +{{ end }} + +{{/* shared edges removal between the two updaters */}} +{{ define "update/edges" }} +{{ $builder := pascal .Scope.Builder }} +{{ $receiver := receiver $builder }} + +{{ range $_, $e := $.Edges }} + {{ if $e.Unique }} + {{ $func := print "Clear" (pascal $e.Name) }} + // {{ $func }} clears the {{ $e.Name }} edge to {{ $e.Type.Name }}. + func ({{ $receiver }} *{{ $builder }}) {{ $func }}() *{{ $builder }} { + {{ $receiver }}.cleared{{ pascal $e.Name }} = true + return {{ $receiver }} + } + {{ else }} + {{ $p := lower (printf "%.1s" $e.Type.Name) }} + {{/* if the name of the parameter conflicts with the receiver name */}} + {{ if eq $p $receiver }} {{ $p = "v" }} {{ end }} + {{ $idsFunc := print "Remove" (singular $e.Name | pascal) "IDs" }} + // {{ $idsFunc }} removes the {{ $e.Name }} edge to {{ $e.Type.Name }} by ids. + func ({{ $receiver }} *{{ $builder }}) {{ $idsFunc }}(ids ...{{ $.ID.Type }}) *{{ $builder }} { + if {{ $receiver }}.removed{{ pascal $e.Name }} == nil { + {{ $receiver }}.removed{{ pascal $e.Name }} = make(map[{{ $.ID.Type }}]struct{}) + } + for i := range ids { + {{ $receiver }}.removed{{ pascal $e.Name }}[ids[i]] = struct{}{} + } + return {{ $receiver }} + } + {{ $func := print "Remove" (pascal $e.Name) }} + // {{ $func }} removes {{ $e.Name }} edges to {{ $e.Type.Name }}. + func ({{ $receiver }} *{{ $builder }}) {{ $func }}({{ $p }} ...*{{ $e.Type.Name }}) *{{ $builder }} { + ids := make([]{{ $.ID.Type }}, len({{ $p }})) + {{ $i := "i" }}{{ if eq $i $p }}{{ $i = "j" }}{{ end -}} + for {{ $i }} := range {{ $p }} { + ids[{{ $i }}] = {{ $p }}[{{ $i }}].ID + } + return {{ $receiver }}.{{ $idsFunc }}(ids...) + } + {{ end }} +{{ end }} +{{ end }} + +{{/* shared templates for validators. */}} +{{ define "update/validators" }} +{{- $pkg := .Scope.Package -}} +{{- $zero := .Scope.ZeroValue }} +{{- $receiver := .Scope.Receiver -}} +{{- range $_, $f := $.Fields -}} + {{ with $f.Validators -}} + if {{ $receiver }}.{{ $f.StructField }} != nil { + if err := {{ $.Package }}.{{ $f.Validator }}(*{{ $receiver }}.{{ $f.StructField }}); err != nil { + return {{ $zero }}, fmt.Errorf("{{ $pkg }}: validator failed for field \"{{ $f.Name }}\": %v", err) + } + } + {{ end -}} +{{ end -}} +{{- range $_, $e := $.Edges }} + {{- if $e.Unique -}} + if len({{ $receiver }}.{{ $e.StructField }}) > 1 { + return {{ $zero }}, errors.New("{{ $pkg }}: multiple assignments on a unique edge \"{{ $e.Name }}\"") + } + {{ if not $e.Optional -}} + if {{ $receiver }}.cleared{{ pascal $e.Name }} && {{ $receiver }}.{{ $e.StructField }} == nil { + return {{ $zero }}, errors.New("{{ $pkg }}: clearing a unique edge \"{{ $e.Name }}\"") + } + {{ end -}} + {{ end -}} +{{ end -}} +{{ end }} diff --git a/entc/gen/template/client.tmpl b/entc/gen/template/client.tmpl new file mode 100644 index 000000000..4057d2629 --- /dev/null +++ b/entc/gen/template/client.tmpl @@ -0,0 +1,158 @@ +{{ define "client" }} + +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +import ( + "log" + + "{{ $.Config.Package }}/migrate" + {{ range $_, $n := $.Nodes }} + "{{ $n.Config.Package }}/{{ $n.Package }}" + {{- end }} +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + {{ range $_, $n := $.Nodes -}} + // {{ $n.Name }} is the client for interacting with the {{ $n.Name }} builders. + {{ $n.Name }} *{{ $n.Name }}Client + {{ end }} +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + c := config{log: log.Println} + c.options(opts...) + return &Client{ + config: c, + Schema: migrate.NewSchema(c.driver), + {{ range $_, $n := $.Nodes -}} + {{ $n.Name }}: New{{ $n.Name }}Client(c), + {{ end -}} + } +} + +// Tx returns a new transactional client. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, fmt.Errorf("{{ $pkg }}: cannot start a transaction within a transaction") + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("{{ $pkg }}: starting a transaction: %v", err) + } + cfg := config{driver: tx, log: c.log, verbose: c.verbose} + return &Tx{ + config: cfg, + {{ range $_, $n := $.Nodes -}} + {{ $n.Name }}: New{{ $n.Name }}Client(cfg), + {{ end -}} + }, nil +} + +{{ range $_, $n := $.Nodes -}} +{{ $client := print $n.Name "Client" }} +// {{ $client }} is a client for the {{ $n.Name }} schema. +type {{ $client }} struct { + config +} + +{{ $rec := $n.Receiver }}{{ if eq $rec "c" }}{{ $rec = printf "%.2s" $n.Name | lower }}{{ end }} + +// New{{ $client }} returns a client for the {{ $n.Name }} from the given config. +func New{{ $client }}(c config) *{{ $client }} { + return &{{ $client }}{config: c} +} + +// Create returns a create builder for {{ $n.Name }}. +func (c *{{ $client }}) Create() *{{ $n.Name }}Create { + return &{{ $n.Name }}Create{config: c.config} +} + +// Update returns an update builder for {{ $n.Name }}. +func (c *{{ $client }}) Update() *{{ $n.Name }}Update { + return &{{ $n.Name }}Update{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *{{ $client }}) UpdateOne({{ $rec }} *{{ $n.Name }}) *{{ $n.Name }}UpdateOne { + return c.UpdateOneID({{ $rec }}.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *{{ $client }}) UpdateOneID(id {{ $n.ID.Type }}) *{{ $n.Name }}UpdateOne { + return &{{ $n.Name }}UpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for {{ $n.Name }}. +func (c *{{ $client }}) Delete() *{{ $n.Name }}Delete { + return &{{ $n.Name }}Delete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *{{ $client }}) DeleteOne({{ $rec }} *{{ $n.Name }}) *{{ $n.Name }}DeleteOne { + return c.DeleteOneID({{ $rec }}.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *{{ $client }}) DeleteOneID(id {{ $n.ID.Type }}) *{{ $n.Name }}DeleteOne { + return &{{ $n.Name }}DeleteOne{c.Delete().Where({{ $n.Package }}.ID(id))} +} + +// Create returns a query builder for {{ $n.Name }}. +func (c *{{ $client }}) Query() *{{ $n.Name }}Query { + return &{{ $n.Name }}Query{config: c.config} +} + +{{ range $_, $e := $n.Edges }} +{{ $builder := print (pascal $e.Type.Name) "Query" }} +// Query{{ pascal $e.Name }} queries the {{ $e.Name }} edge of a {{ $n.Name }}. +func (c *{{ $client }}) Query{{ pascal $e.Name }}({{ $rec }} *{{ $n.Name }}) *{{ $builder }} { + query := &{{ $e.Type.Name }}Query{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := {{ $rec }}.{{- if $n.ID.IsString }}id(){{ else }}ID{{ end }} + {{- if $e.M2M }} + {{ $i := 1 }}{{ $j := 0 }}{{- if $e.IsInverse }}{{ $i = 0 }}{{ $j = 1 }}{{ end -}} + t1 := sql.Table({{ $e.Type.Package }}.Table) + t2 := sql.Table({{ $n.Package }}.Table) + t3 := sql.Table({{ $n.Package }}.{{ $e.TableConstant }}) + t4 := sql.Select(t3.C({{ $n.Package }}.{{ $e.PKConstant }}[{{ $i }}])). + From(t3). + Join(t2). + On(t3.C({{ $n.Package }}.{{ $e.PKConstant }}[{{ $j }}]), t2.C({{ $n.Package }}.{{ $n.ID.Constant }})). + Where(sql.EQ(t2.C({{ $n.Package }}.{{ $n.ID.Constant }}), id)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}), t4.C({{ $n.Package }}.{{ $e.PKConstant }}[{{ $i }}])) + {{- else if or $e.M2O (and $e.O2O $e.IsInverse) }}{{/* M2O || (O2O with inverse edge) */}} + t1 := sql.Table({{ $e.Type.Package }}.Table) + t2 := sql.Select({{ $n.Package }}.{{ $e.ColumnConstant }}). + From(sql.Table({{ $n.Package }}.{{ $e.TableConstant }})). + Where(sql.EQ({{ $n.Package }}.{{ $n.ID.Constant }}, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}), t2.C({{ $n.Package }}.{{ $e.ColumnConstant }})) + {{- else }}{{/* O2M || (O2O with assoc edge) */}} + query.sql = sql.Select().From(sql.Table({{ $e.Type.Package }}.Table)). + Where(sql.EQ({{ $n.Package }}.{{ $e.ColumnConstant }}, id)) + {{- end }} + case dialect.Neptune: + {{- if $e.SelfRef }} + query.gremlin = g.V({{ $rec }}.ID).Both({{ $n.Package }}.{{ $e.Constant }}) + {{- else if $e.IsInverse }} + query.gremlin = g.V({{ $rec }}.ID).InE({{ $e.Type.Package }}.{{ $e.Constant }}).OutV() + {{- else }} + query.gremlin = g.V({{ $rec }}.ID).OutE({{ $n.Package }}.{{ $e.Constant }}).InV() + {{- end }} + } + return query +} +{{ end }} + +{{ end }} +{{ end }} + diff --git a/entc/gen/template/config.tmpl b/entc/gen/template/config.tmpl new file mode 100644 index 000000000..37d6a27d1 --- /dev/null +++ b/entc/gen/template/config.tmpl @@ -0,0 +1,52 @@ +{{ define "config" }} + +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +// Option function to configure the client. +type Option func(*config) + +// Config is the configuration for the client and its builder. +type config struct { + // driver is the driver used for execute database requests. + driver dialect.Driver + // verbose enable a verbosity logging. + verbose bool + // log used for logging on verbose mode. + log func(...interface{}) +} + +// Options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.verbose { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Verbose sets the client logging to verbose. +func Verbose() Option { + return func(c *config) { + c.verbose = true + } +} + +// Log sets the client logging to verbose. +func Log(fn func(...interface{})) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/gremlin/create.tmpl b/entc/gen/template/dialect/gremlin/create.tmpl new file mode 100644 index 000000000..f45bf086d --- /dev/null +++ b/entc/gen/template/dialect/gremlin/create.tmpl @@ -0,0 +1,73 @@ +{{ define "dialect/gremlin/create" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver }} *{{ $builder }}) gremlinSave(ctx context.Context) (*{{ $.Name }}, error) { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlin().Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + {{ $.Receiver }} := &{{ $.Name }}{config: {{ $receiver }}.config} + if err := {{ $.Receiver }}.FromResponse(res); err != nil { + return nil, err + } + return {{ $.Receiver }}, nil +} + +func ({{ $receiver }} *{{ $builder }}) gremlin() *dsl.Traversal { + {{- with .NumConstraint }} + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, {{ . }}) + {{- end }} + v := g.AddV({{ $.Package }}.Label) + {{- range $_, $f := $.Fields }} + if {{ $receiver }}.{{- $f.StructField }} != nil { + {{- if $f.Unique }} + constraints = append(constraints, &constraint{ + pred: g.V().Has({{ $.Package }}.Label, {{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }}).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField({{ $.Package }}.Label, {{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }})), + }) + {{- end }} + v.Property(dsl.Single, {{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }}) + } + {{- end }} + {{- range $_, $e := $.Edges }} + {{- $direction := "In" }} + {{- $name := printf "%s.%s" $.Package $e.Constant }} + for id := range {{ $receiver }}.{{ $e.StructField }} { + {{- if $e.IsInverse }} + {{- $direction = "Out" }} + {{- $name = printf "%s.%s" $e.Type.Package $e.Constant }} + v.AddE({{ $name }}).From(g.V(id)).InV() + {{- else }} + v.AddE({{ $name }}).To(g.V(id)).OutV() + {{- end }} + {{- if $e.HasConstraint }} + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel({{ $name }}).{{ $direction }}V().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge({{ $.Package }}.Label, {{ $name }}, id)), + }) + {{- end }} + } + {{- end }} + {{- with .NumConstraint }} + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr + {{- else }} + return v.ValueMap(true) + {{- end }} +} +{{ end }} diff --git a/entc/gen/template/dialect/gremlin/delete.tmpl b/entc/gen/template/dialect/gremlin/delete.tmpl new file mode 100644 index 000000000..b2044821e --- /dev/null +++ b/entc/gen/template/dialect/gremlin/delete.tmpl @@ -0,0 +1,19 @@ +{{ define "dialect/gremlin/delete" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver}} *{{ $builder }}) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlin().Query() + return {{ $receiver }}.driver.Exec(ctx, query, bindings, res) +} + + +func ({{ $receiver }} *{{ $builder }}) gremlin() *dsl.Traversal { + t := g.V().HasLabel({{ $.Package }}.Label) + for _, p := range {{ $receiver }}.predicates { + p.Gremlin(t) + } + return t.Drop() +} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/gremlin/group.tmpl b/entc/gen/template/dialect/gremlin/group.tmpl new file mode 100644 index 000000000..f292b558a --- /dev/null +++ b/entc/gen/template/dialect/gremlin/group.tmpl @@ -0,0 +1,42 @@ +{{ define "dialect/gremlin/group" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver }} *{{ $builder }}) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlinQuery().Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len({{ $receiver }}.fields)+len({{ $receiver }}.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + + +func ({{ $receiver }} *{{ $builder }}) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range {{ $receiver }}.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range {{ $receiver }}.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return {{ $receiver }}.gremlin.Group(). + By(__.Values({{ $receiver }}.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/gremlin/query.tmpl b/entc/gen/template/dialect/gremlin/query.tmpl new file mode 100644 index 000000000..295b301fe --- /dev/null +++ b/entc/gen/template/dialect/gremlin/query.tmpl @@ -0,0 +1,76 @@ +{{ define "dialect/gremlin/query" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver }} *{{ $builder }}) gremlinIDs(ctx context.Context) ([]{{ $.ID.Type }}, error) { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlinQuery().Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]{{ $.ID.Type }}, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.({{ $.ID.Type }})) + } + return ids, nil +} + +func ({{ $receiver }} *{{ $builder }}) gremlinAll(ctx context.Context) ([]*{{ $.Name }}, error) { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlinQuery().ValueMap(true).Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var {{ plural $.Receiver }} {{ plural $.Name }} + if err := {{ plural $.Receiver }}.FromResponse(res); err != nil { + return nil, err + } + {{ plural $.Receiver }}.config({{ $receiver }}.config) + return {{ plural $.Receiver }}, nil +} + +func ({{ $receiver }} *{{ $builder }}) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlinQuery().Count().Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func ({{ $receiver }} *{{ $builder }}) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlinQuery().HasNext().Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func ({{ $receiver }} *{{ $builder }}) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel({{ $.Package }}.Label) + if {{ $receiver }}.gremlin != nil { + v = {{ $receiver }}.gremlin.Clone() + } + for _, p := range {{ $receiver }}.predicates { + p.Gremlin(v) + } + if len({{ $receiver }}.order) > 0 { + v.Order() + for _, p := range {{ $receiver }}.order { + p.Gremlin(v) + } + } + if limit := {{ $receiver }}.limit; limit != nil { + v.Limit(*limit) + } + if unique := {{ $receiver }}.unique; len(unique) == 0 { + v.Dedup() + } + return v +} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/gremlin/update.tmpl b/entc/gen/template/dialect/gremlin/update.tmpl new file mode 100644 index 000000000..b7904fae5 --- /dev/null +++ b/entc/gen/template/dialect/gremlin/update.tmpl @@ -0,0 +1,129 @@ +{{ define "dialect/gremlin/update" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} +{{ $one := hasSuffix $builder "One" }} + +func ({{ $receiver }} *{{ $builder }}) gremlinSave(ctx context.Context) ({{- if not $one }}[]{{ end }}*{{ $.Name }}, error) { + res := &gremlin.Response{} + query, bindings := {{ $receiver }}.gremlin({{- if $one }}{{ $receiver }}.id{{ end }}).Query() + if err := {{ $receiver }}.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + {{- $r := plural $.Receiver }} + {{- if $one }} + {{- $r = $.Receiver }} + {{ $r }} := &{{ $.Name }}{config: {{ $receiver }}.config} + {{- else }} + var {{ $r }} {{ plural $.Name }} + {{ $r }}.config({{ $receiver }}.config) + {{- end }} + if err := {{ $r }}.FromResponse(res); err != nil { + return nil, err + } + return {{ $r }}, nil +} + +func ({{ $receiver }} *{{ $builder }}) gremlin({{ if $one }}id {{ $.ID.Type }}{{ end }}) *dsl.Traversal { + {{- with .NumConstraint }} + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, {{ . }}) + {{- end }} + {{- /* case of update specific vertex */}} + {{- if $one }} + v := g.V(id) + {{- /* general update for N vertices */}} + {{- else }} + v := g.V().HasLabel({{ $.Package }}.Label) + for _, p := range {{ $receiver }}.predicates { + p.Gremlin(v) + } + {{- end }} + var ( + {{ if or .NumConstraint (len $.Edges) }}rv = v.Clone(){{ end }} + trs []*dsl.Traversal + ) + {{- range $_, $f := $.Fields }} + if {{ $receiver }}.{{ $f.StructField }} != nil { + {{- if $f.Unique }} + constraints = append(constraints, &constraint{ + pred: g.V().Has({{ $.Package }}.Label, {{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }}).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField({{ $.Package }}.Label, {{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }})), + }) + {{- end }} + v.Property(dsl.Single, {{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }}) + } + {{- end }} + {{- range $_, $e := $.Edges }} + {{- $direction := "In" }} + {{- $name := printf "%s.%s" $.Package $e.Constant }} + {{- if $e.IsInverse }} + {{- $direction = "Out" }} + {{- $name = printf "%s.%s" $e.Type.Package $e.Constant }} + {{- end }} + {{- /* remove edges */}} + {{- if $e.Unique }} + if {{ $receiver }}.cleared{{ pascal $e.Name }} { + {{- else }} + for id := range {{ $receiver }}.removed{{ pascal $e.Name }} { + {{- end }} + {{- if $e.SelfRef }} + tr := rv.Clone().BothE({{ $name }}){{ if not $e.Unique }}.Where(__.Or(__.InV().HasID(id), __.OutV().HasID(id))){{ end }}.Drop().Iterate() + {{- else if $e.IsInverse }} + tr := rv.Clone().InE({{ $name }}){{ if not $e.Unique }}.Where(__.OtherV().HasID(id)){{ end }}.Drop().Iterate() + {{- else }} + tr := rv.Clone().OutE({{ $name }}){{ if not $e.Unique }}.Where(__.OtherV().HasID(id)){{ end }}.Drop().Iterate() + {{- end }} + trs = append(trs, tr) + } + {{- /* update edges */}} + for id := range {{ $receiver }}.{{ $e.StructField }} { + {{- if $e.IsInverse }} + v.AddE({{ $name }}).From(g.V(id)).InV() + {{- else }} + v.AddE({{ $name }}).To(g.V(id)).OutV() + {{- end }} + {{- if $e.HasConstraint }} + {{- if $e.SelfRef }} + constraints = append(constraints, &constraint{ + pred: rv.Clone().Both({{ $name }}).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge({{ $.Package }}.Label, {{ $name }}, id)), + }) + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel({{ $name }}).Where(__.Or(__.InV().HasID(id), __.OutV().HasID(id))).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge({{ $.Package }}.Label, {{ $name }}, id)), + }) + {{- else }} + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel({{ $name }}).{{ $direction }}V().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge({{ $.Package }}.Label, {{ $name }}, id)), + }) + {{- end }} + {{- end }} + } + {{- end }} + v.ValueMap(true) + {{- with .NumConstraint }} + if len(constraints) > 0 { + {{- /* make sure the traversal does not contain more than one vertex if we have constraint */}} + {{- if not $one }} + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + {{- end }} + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + {{- end }} + trs = append(trs, v) + return dsl.Join(trs...) +} +{{ end }} diff --git a/entc/gen/template/dialect/sql/create.tmpl b/entc/gen/template/dialect/sql/create.tmpl new file mode 100644 index 000000000..7ae302372 --- /dev/null +++ b/entc/gen/template/dialect/sql/create.tmpl @@ -0,0 +1,149 @@ +{{ define "dialect/sql/create" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver }} *{{ $builder }}) sqlSave(ctx context.Context) (*{{ $.Name }}, error) { + var ( + res sql.Result + {{ $.Receiver }} = &{{ $.Name }}{config: {{ $receiver }}.config} + ) + tx, err := {{ $receiver }}.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert({{ $.Package }}.Table).Default({{ $receiver }}.driver.Dialect()) + {{- range $_, $f := $.Fields }} + if {{ $receiver }}.{{- $f.StructField }} != nil { + builder.Set({{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }}) + {{ if $f.Nullable }}*{{ end }}{{ $.Receiver }}.{{ pascal $f.Name }} = *{{ $receiver }}.{{ $f.StructField }} + } + {{- end }} + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + {{ $.Receiver }}.ID = {{ if $.ID.IsString }}strconv.FormatInt(id, 10){{ else }}id{{ end }} + {{- range $_, $e := $.Edges }} + if len({{ $receiver }}.{{ $e.StructField }}) > 0 { + {{- if and $e.Unique $e.SelfRef }}{{/* O2O with self reference */}} + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/create/convertid" $e -}} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, eid). + Where(sql.EQ({{ $.Package }}.{{ $.ID.Constant }}, id)).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + query, args = sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, id). + Where(sql.EQ({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eid).And().IsNull({{ $.Package }}.{{ $e.ColumnConstant }})).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len({{ $receiver }}.{{ $e.StructField }}) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("\"{{ $e.Name }}\" (%v) already connected to a different \"{{ $.Name }}\"", eid)}) + } + } + {{- else if $e.M2M }} + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/create/convertid" $e -}} + {{ $a := 0 }}{{ $b := 1 }}{{- if $e.IsInverse }}{{ $a = 1 }}{{ $b = 0 }}{{ end }} + query, args := sql.Insert({{ $.Package }}.{{ $e.TableConstant }}). + Columns({{ $.Package }}.{{ $e.PKConstant }}[{{ $a }}], {{ $.Package }}.{{ $e.PKConstant }}[{{ $b }}]). + Values(id, eid). + {{- if $e.SelfRef }}{{/* self-ref creates the edges in both ways. */}} + Values(eid, id). + {{- end }} + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + {{- else if $e.M2O }} + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/create/convertid" $e -}} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, eid). + Where(sql.EQ({{ $.Package }}.{{ $.ID.Constant }}, id)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + {{- else if $e.O2M }} + p := sql.P() + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/create/convertid" $e -}} + p.Or().EQ({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eid) + } + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, id). + Where(sql.And(p, sql.IsNull({{ $.Package }}.{{ $e.ColumnConstant }}))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len({{ $receiver }}.{{ $e.StructField }}) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"{{ $e.Name }}\" %v already connected to a different \"{{ $.Name }}\"", keys({{ $receiver }}.{{ $e.StructField }}))}) + } + {{- else }}{{/* O2O */}} + {{- if $.Type.ID.IsString }} + eid, err := strconv.Atoi(keys({{ $receiver }}.{{ $e.StructField }})[0]) + if err != nil { + return nil, err + } + {{- else }} + eid := keys({{ $receiver }}.{{ $e.StructField }})[0] + {{- end }} + {{- if $e.IsInverse }} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, eid). + Where(sql.EQ({{ $.Package }}.{{ $.ID.Constant }}, id).And().IsNull({{ $.Package }}.{{ $e.ColumnConstant }})). + Query() + {{- else }} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, id). + Where(sql.EQ({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eid).And().IsNull({{ $.Package }}.{{ $e.ColumnConstant }})). + Query() + {{- end }} + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len({{ $receiver }}.{{ $e.StructField }}) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"{{ $e.Name }}\" %v already connected to a different \"{{ $.Name }}\"", keys({{ $receiver }}.{{ $e.StructField }}))}) + } + {{- end }} + } + {{- end }} + if err := tx.Commit(); err != nil { + return nil, err + } + return {{ $.Receiver }}, nil +} + +{{ end }} + +{{ define "dialect/sql/create/convertid" }} + {{- if $.Type.ID.IsString }} + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + {{- end }} +{{ end }} diff --git a/entc/gen/template/dialect/sql/delete.tmpl b/entc/gen/template/dialect/sql/delete.tmpl new file mode 100644 index 000000000..08bd48642 --- /dev/null +++ b/entc/gen/template/dialect/sql/delete.tmpl @@ -0,0 +1,15 @@ +{{ define "dialect/sql/delete" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver}} *{{ $builder }}) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table({{ $.Package }}.Table)) + for _, p := range {{ $receiver }}.predicates { + p.SQL(selector) + } + query, args := sql.Delete({{ $.Package }}.Table).FromSelect(selector).Query() + return {{ $receiver }}.driver.Exec(ctx, query, args, &res) +} + +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/sql/group.tmpl b/entc/gen/template/dialect/sql/group.tmpl new file mode 100644 index 000000000..3372acab0 --- /dev/null +++ b/entc/gen/template/dialect/sql/group.tmpl @@ -0,0 +1,25 @@ +{{ define "dialect/sql/group" }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver }} *{{ $builder }}) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := {{ $receiver }}.sqlQuery().Query() + if err := {{ $receiver }}.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + + +func ({{ $receiver }} *{{ $builder }}) sqlQuery() *sql.Selector { + selector := {{ $receiver }}.sql + columns := make([]string, 0, len({{ $receiver }}.fields) + len({{ $receiver}}.fns)) + columns = append(columns, {{ $receiver }}.fields...) + for _, fn := range {{ $receiver }}.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy({{ $receiver }}.fields...) +} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/sql/query.tmpl b/entc/gen/template/dialect/sql/query.tmpl new file mode 100644 index 000000000..cc0c02903 --- /dev/null +++ b/entc/gen/template/dialect/sql/query.tmpl @@ -0,0 +1,89 @@ +{{ define "dialect/sql/query" }} +{{ $pkg := $.Scope.Package }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} + +func ({{ $receiver }} *{{ $builder }}) sqlAll(ctx context.Context) ([]*{{ $.Name }}, error) { + rows := &sql.Rows{} + selector := {{ $receiver }}.sqlQuery() + if unique := {{ $receiver }}.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := {{ $receiver }}.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + {{- $ret := plural $.Receiver }} + var {{ $ret }} {{ plural $.Name }} + if err := {{ $ret }}.FromRows(rows); err != nil { + return nil, err + } + {{ $ret }}.config({{ $receiver }}.config) + return {{ $ret }}, nil +} + +func ({{ $receiver }} *{{ $builder }}) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := {{ $receiver }}.sqlQuery() + unique := []string{ {{ $.Package }}.{{ $.ID.Constant }} } + if len({{ $receiver }}.unique) > 0 { + unique = {{ $receiver }}.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := {{ $receiver }}.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("{{ $pkg }}: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("{{ $pkg }}: failed reading count: %v", err) + } + return n, nil +} + +func ({{ $receiver }} *{{ $builder }}) sqlExist(ctx context.Context) (bool, error) { + n, err := {{ $receiver }}.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("{{ $pkg }}: check existence: %v", err) + } + return n > 0, nil +} + +func ({{ $receiver }} *{{ $builder }}) sqlIDs(ctx context.Context) ([]{{ $.ID.Type }}, error) { + vs, err := {{ $receiver }}.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []{{ $.ID.Type }} + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + + +func ({{ $receiver }} *{{ $builder }}) sqlQuery() *sql.Selector { + t1 := sql.Table({{ $.Package }}.Table) + selector := sql.Select(t1.Columns({{ $.Package }}.Columns...)...).From(t1) + if {{ $receiver }}.sql != nil { + selector = {{ $receiver }}.sql + selector.Select(selector.Columns({{ $.Package }}.Columns...)...) + } + for _, p := range {{ $receiver }}.predicates { + p.SQL(selector) + } + for _, p := range {{ $receiver }}.order { + p.SQL(selector) + } + if limit := {{ $receiver }}.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/dialect/sql/update.tmpl b/entc/gen/template/dialect/sql/update.tmpl new file mode 100644 index 000000000..e3e5f035b --- /dev/null +++ b/entc/gen/template/dialect/sql/update.tmpl @@ -0,0 +1,272 @@ +{{ define "dialect/sql/update" }} +{{ $pkg := $.Scope.Package }} +{{ $builder := pascal $.Scope.Builder }} +{{ $receiver := receiver $builder }} +{{ $one := hasSuffix $builder "One" }} +{{- $zero := 0 }}{{ if $one }}{{ $zero = "nil" }}{{ end }} +{{- $ret := "n" }}{{ if $one }}{{ $ret = $.Receiver }}{{ end }} + +func ({{ $receiver }} *{{ $builder }}) sqlSave(ctx context.Context) ({{ $ret }} {{ if $one }}*{{ $.Name }}{{ else }}int{{ end }}, err error) { + selector := sql.Select({{ $.Package }}.{{ if $one }}Columns...{{ else }}{{ $.ID.Constant }}{{ end }}).From(sql.Table({{ $.Package }}.Table)) + {{- if $one }} + {{ $.Package }}.ID({{ $receiver }}.id).SQL(selector) + {{- else }} + for _, p := range {{ $receiver }}.predicates { + p.SQL(selector) + } + {{- end }} + rows := &sql.Rows{} + query, args := selector.Query() + if err = {{ $receiver }}.driver.Query(ctx, query, args, rows); err != nil { + return {{ $zero }}, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + {{- if $one }} + {{ $.Receiver }} = &{{ $.Name }}{config: {{ $receiver }}.config} + if err := {{ $.Receiver }}.FromRows(rows); err != nil { + return {{ $zero }}, fmt.Errorf("{{ $pkg }}: failed scanning row into {{ $.Name }}: %v", err) + } + id = {{ $.Receiver }}.{{- if $.ID.IsString }}id(){{ else }}ID{{ end }} + {{- else }} + if err := rows.Scan(&id); err != nil { + return {{ $zero }}, fmt.Errorf("{{ $pkg }}: failed reading id: %v", err) + } + {{- end }} + ids = append(ids, id) + } + {{- if $one }} + switch n := len(ids); { + case n == 0: + return {{ $zero }}, fmt.Errorf("{{ $pkg }}: {{ $.Name }} not found with id: %v", {{ $receiver }}.id) + case n > 1: + return {{ $zero }}, fmt.Errorf("{{ $pkg }}: more than one {{ $.Name }} with the same id: %v", {{ $receiver }}.id) + } + {{- else }} + if len(ids) == 0 { + return {{ $zero }}, nil + } + {{- end }} + {{/* if there's something to update, start a transaction. */}} + tx, err := {{ $receiver }}.driver.Tx(ctx) + if err != nil { + return {{ $zero }}, err + } + {{- if $.Fields }} + var ( + update bool + res sql.Result + builder = sql.Update({{ $.Package }}.Table).Where(sql.InInts({{ $.Package }}.{{ $.ID.Constant }}, ids...)) + ) + {{- range $_, $f := $.Fields }} + if {{ $receiver }}.{{ $f.StructField }} != nil { + update = true + builder.Set({{ $.Package }}.{{ $f.Constant }}, *{{ $receiver }}.{{ $f.StructField }}) + {{- if $one }} + {{ if $f.Nullable }}*{{ end }}{{ $.Receiver }}.{{ pascal $f.Name }} = *{{ $receiver }}.{{ $f.StructField }} + {{- end }} + } + {{- end }} + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + } + {{- else if $.Edges }}{{/* ent without fields, but with edges */}} + var res sql.Result + {{- end }} + {{- range $_, $e := $.Edges }} + {{- if $e.M2M }} + if len({{ $receiver }}.removed{{ pascal $e.Name }}) > 0 { + {{- $a := 0 }}{{ $b := 1 }}{{ if $e.IsInverse }}{{ $a = 1 }}{{ $b = 0 }}{{ end }} + eids := make([]int, len({{ $receiver }}.removed{{ pascal $e.Name }})) + for eid := range {{ $receiver }}.removed{{ pascal $e.Name }} { + {{- template "dialect/sql/update/convertid" $e -}} + eids = append(eids, eid) + } + query, args := sql.Delete({{ $.Package }}.{{ $e.TableConstant }}). + Where(sql.InInts({{ $.Package }}.{{ $e.PKConstant }}[{{ $a }}], ids...)). + Where(sql.InInts({{ $.Package }}.{{ $e.PKConstant }}[{{ $b }}], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + {{- if $e.SelfRef }}{{/* M2M with self reference */}}{{/* TODO: use OR in the case above. */}} + query, args = sql.Delete({{ $.Package }}.{{ $e.TableConstant }}). + Where(sql.InInts({{ $.Package }}.{{ $e.PKConstant }}[{{ $b }}], ids...)). + Where(sql.InInts({{ $.Package }}.{{ $e.PKConstant }}[{{ $a }}], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + {{- end }} + } + {{- else if $e.O2M }} + if len({{ $receiver }}.removed{{ pascal $e.Name }}) > 0 { + eids := make([]int, len({{ $receiver }}.removed{{ pascal $e.Name }})) + for eid := range {{ $receiver }}.removed{{ pascal $e.Name }} { + {{- template "dialect/sql/update/convertid" $e -}} + eids = append(eids, eid) + } + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + SetNull({{ $.Package }}.{{ $e.ColumnConstant }}). + Where(sql.InInts({{ $.Package }}.{{ $e.ColumnConstant }}, ids...)). + Where(sql.InInts({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + } + {{- else }}{{/* O2O or M2O */}} + if {{ $receiver }}.cleared{{ pascal $e.Name }} { + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + SetNull({{ $.Package }}.{{ $e.ColumnConstant }}). + Where(sql.InInts({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + {{- if $e.SelfRef }}{{/* O2O with self reference */}} + query, args = sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + SetNull({{ $.Package }}.{{ $e.ColumnConstant }}). + Where(sql.InInts({{ $.Package }}.{{ $e.ColumnConstant }}, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + {{- end }} + } + {{- end }} + if len({{ $receiver }}.{{ $e.StructField }}) > 0 { + {{- if and $e.Unique $e.SelfRef }}{{/* O2O with self reference */}} + if n := len(ids); n > 1 { + return {{ $zero }}, rollback(tx, fmt.Errorf("{{ $pkg }}: can't link O2O edge \"{{ $e.Name }}\" to %d vertices (> 1)", n)) + } + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/update/convertid" $e -}} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, eid). + Where(sql.EQ({{ $.Package }}.{{ $.ID.Constant }}, ids[0])).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + query, args = sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, ids[0]). + Where(sql.EQ({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eid).And().IsNull({{ $.Package }}.{{ $e.ColumnConstant }})).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return {{ $zero }}, rollback(tx, err) + } + if int(affected) < len({{ $receiver }}.{{ $e.StructField }}) { + return {{ $zero }}, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("\"{{ $e.Name }}\" (%v) already connected to a different \"{{ $.Name }}\"", eid)}) + } + } + {{- else if $e.M2M }} + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/update/convertid" $e -}} + values = append(values, []int{id, eid}, {{- if $e.SelfRef }}[]int{eid, id}{{ end }}){{/* self-ref creates the edges in both ways. */}} + } + } + {{- $a := 0 }}{{ $b := 1 }}{{ if $e.IsInverse }}{{ $a = 1 }}{{ $b = 0 }}{{ end }} + builder := sql.Insert({{ $.Package }}.{{ $e.TableConstant }}). + Columns({{ $.Package }}.{{ $e.PKConstant }}[{{ $a }}], {{ $.Package }}.{{ $e.PKConstant }}[{{ $b }}]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + {{- else if $e.M2O }} + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/update/convertid" $e -}} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, eid). + Where(sql.InInts({{ $.Package }}.{{ $.ID.Constant }}, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + } + {{- else if $e.O2M }} + for _, id := range ids { + p := sql.P() + for eid := range {{ $receiver }}.{{ $e.StructField }} { + {{- template "dialect/sql/update/convertid" $e -}} + p.Or().EQ({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eid) + } + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, id). + Where(sql.And(p, sql.IsNull({{ $.Package }}.{{ $e.ColumnConstant }}))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return {{ $zero }}, rollback(tx, err) + } + if int(affected) < len({{ $receiver }}.{{ $e.StructField }}) { + return {{ $zero }}, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"{{ $e.Name }}\" %v already connected to a different \"{{ $.Name }}\"", keys({{ $receiver }}.{{ $e.StructField }}))}) + } + } + {{- else }}{{/* O2O */}} + for _, id := range ids { + {{- if $.Type.ID.IsString }} + eid, serr := strconv.Atoi(keys({{ $receiver }}.{{ $e.StructField }})[0]) + if serr != nil { + return {{ $zero }}, err + } + {{- else }} + eid := keys({{ $receiver }}.{{ $e.StructField }})[0] + {{- end }} + {{- if $e.IsInverse }} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, eid). + Where(sql.EQ({{ $.Package }}.{{ $.ID.Constant }}, id).And().IsNull({{ $.Package }}.{{ $e.ColumnConstant }})). + Query() + {{- else }} + query, args := sql.Update({{ $.Package }}.{{ $e.TableConstant }}). + Set({{ $.Package }}.{{ $e.ColumnConstant }}, id). + Where(sql.EQ({{ $e.Type.Package }}.{{ $e.Type.ID.Constant }}, eid).And().IsNull({{ $.Package }}.{{ $e.ColumnConstant }})). + Query() + {{- end }} + if err := tx.Exec(ctx, query, args, &res); err != nil { + return {{ $zero }}, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return {{ $zero }}, rollback(tx, err) + } + if int(affected) < len({{ $receiver }}.{{ $e.StructField }}) { + return {{ $zero }}, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"{{ $e.Name }}\" %v already connected to a different \"{{ $.Name }}\"", keys({{ $receiver }}.{{ $e.StructField }}))}) + } + } + {{- end }} + } + {{- end }} + if err = tx.Commit(); err != nil { + return {{ $zero }}, err + } + return {{ if $one }}{{ $.Receiver }}{{ else }}len(ids){{ end }}, nil +} + +{{ end }} + +{{ define "dialect/sql/update/convertid" }} + {{- if $.Type.ID.IsString }} + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return {{/* return is not knwon at this point. */}} + } + {{- end }} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/ent.tmpl b/entc/gen/template/ent.tmpl new file mode 100644 index 000000000..25e9b442e --- /dev/null +++ b/entc/gen/template/ent.tmpl @@ -0,0 +1,192 @@ +{{ define "model" }} + +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +{{ template "import" $ }} + +// {{ $.Name }} is the model entity for the {{ $.Name }} schema. +type {{ $.Name }} struct { + config + // ID of the ent. + ID {{ $.ID.Type }} `json:"id,omitempty"` + {{ range $_, $f := $.Fields -}} + // {{ pascal $f.Name }} holds the value of the "{{ $f.Name }}" field. + {{ pascal $f.Name }} {{ if $f.Nullable }}*{{ end }}{{ $f.Type }} `{{ $f.StructTag }}` + {{ end -}} + {{ range $_, $e := $.Edges -}} + {{/* ignore generating edge fields */}} + {{- with $e.StructTag -}} + // {{ pascal $e.Name }} holds the value of the {{ $e.Name }} edge. The value set to nil, and should be updated manually. + {{ pascal $e.Name }} {{ if not $e.Unique }}[]{{ end }}*{{ $e.Type.Name }} `{{ $e.StructTag }}` + {{ end -}} + {{ end -}} +} + +{{ $receiver := $.Receiver }} + +// FromResponse scans the gremlin response data into {{ $.Name }}. +func ({{ $receiver }} *{{ $.Name }}) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + {{- $scan := print "v" $receiver }} + var {{ $scan }} struct { + ID {{ $.ID.Type }} `json:"id,omitempty"` + {{ range $_, $f := $.Fields }} + {{- pascal $f.Name }} {{ if $f.IsTime }}int64{{ else }}{{ if $f.Nullable }}*{{ end }}{{ $f.Type }}{{ end }} `json:"{{ $f.Name }},omitempty"` + {{ end }} + } + if err := vmap.Decode(&{{ $scan }}); err != nil { + return err + } + {{ $receiver }}.ID = {{ $scan }}.ID + {{ range $_, $f := $.Fields }} + {{- $receiver }}.{{ pascal $f.Name }} = {{- if $f.IsTime }}time.Unix({{ $scan }}.{{- pascal $f.Name }}, 0) {{ else }}{{- $scan }}.{{- pascal $f.Name }}{{ end }} + {{ end -}} + return nil +} + +// FromRows scans the sql response data into {{ $.Name }}. +func ({{ $receiver }} *{{ $.Name }}) FromRows(rows *sql.Rows) error { + {{- $scan = print "v" $receiver }} + var {{ $scan }} struct { + ID {{ if $.ID.IsString }}int{{ else }}{{ $.ID.Type }}{{ end }} + {{ range $_, $f := $.Fields }} + {{- pascal $f.Name }} {{ if or $f.Nullable $f.Optional }}{{ $f.NullType }}{{ else }}{{ $f.Type }}{{ end }} + {{ end }} + } + // the order here should be the same as in the `{{ $.Package }}.Columns`. + if err := rows.Scan( + &{{ $scan }}.ID, + {{- range $_, $f := $.Fields }} + &{{ $scan }}.{{- pascal $f.Name }}, + {{- end }} + ); err != nil { + return err + } + {{ $receiver }}.ID = {{ if $.ID.IsString }}strconv.Itoa({{ $scan }}.ID){{ else }}{{ $scan }}.ID{{ end }} + {{- range $_, $f := $.Fields }} + {{- if $f.Nullable }} + {{- if $f.IsTime }} + {{ $receiver }}.{{ pascal $f.Name }} = &{{ $scan }}.{{ pascal $f.Name }} + {{- else }} + if {{ $scan }}.{{- pascal $f.Name }}.Valid { + {{ $receiver }}.{{ pascal $f.Name }} = &{{ printf "%s.%s" $scan (pascal $f.Name) | $f.NullTypeField }} + } + {{- end }} + {{- else if $f.Optional }} + {{ $receiver }}.{{ pascal $f.Name }} = {{ printf "%s.%s" $scan (pascal $f.Name) | $f.NullTypeField }} + {{- else }} + {{ $receiver }}.{{ pascal $f.Name }} = {{ $scan }}.{{ pascal $f.Name }} + {{- end }} + {{- end }} + return nil +} + +{{ range $_, $e := $.Edges }} + {{ $func := print "Query" (pascal $e.Name) }} + // {{ $func }} queries the {{ $e.Name }} edge of the {{ $.Name }}. + func ({{ $receiver }} *{{ $.Name }}) {{ $func }}() *{{ $e.Type.Name}}Query { + return (&{{ $.Name }}Client{ {{ $receiver }}.config}).{{ $func }}({{ $receiver }}) + } +{{ end }} + +// Update returns a builder for updating this {{ $.Name }}. +// Note that, you need to call {{ $.Name }}.Unwrap() before calling this method, if this {{ $.Name }} +// was returned from a transaction, and the transaction was committed or rolled back. +func ({{ $receiver }} *{{ $.Name }}) Update() *{{ $.Name }}UpdateOne { + return (&{{ $.Name }}Client{ {{ $receiver }}.config}).UpdateOne({{ $receiver }}) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func ({{ $receiver }} *{{ $.Name }}) Unwrap() *{{ $.Name }} { + tx, ok := {{ $receiver }}.config.driver.(*txDriver) + if !ok { + panic("{{ $pkg }}: {{ $.Name }} is not a transactional entity") + } + {{ $receiver }}.config.driver = tx.drv + return {{ $receiver }} +} + +// String implements the fmt.Stringer. +func ({{ $receiver }} *{{ $.Name }}) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("{{ $.Name }}(") + buf.WriteString(fmt.Sprintf("id=%v,", {{ $receiver }}.ID)) + {{- range $i, $f := $.Fields }} + {{- if gt $i 0 }} + buf.WriteString(", ") + {{- end }} + {{- if $f.Nullable }} + if v := {{ $receiver }}.{{ pascal $f.Name }}; v != nil { + buf.WriteString(fmt.Sprintf("{{ $f.Name }}=%v", *v)) + } + {{- else }} + buf.WriteString(fmt.Sprintf("{{ $f.Name }}=%v", {{ $receiver }}.{{ pascal $f.Name }})) + {{- end }} + {{- end }} + buf.WriteString(")") + return buf.String() +} + +{{- if $.ID.IsString }} +// id returns the int representation of the ID field. +func ({{ $receiver }} *{{ $.Name }}) id() int { + id, _ := strconv.Atoi({{ $receiver }}.ID) + return id +} +{{- end }} + +{{ $slice := plural $.Name }} +// {{ $slice }} is a parsable slice of {{ $.Name }}. +type {{ $slice }} []*{{ $.Name }} + +// FromResponse scans the gremlin response data into {{ $slice }}. +func ({{ $receiver }} *{{ $slice }}) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + {{- $scan = print "v" $receiver }} + var {{ $scan }} []struct { + ID {{ $.ID.Type }} `json:"id,omitempty"` + {{ range $_, $f := $.Fields }} + {{- pascal $f.Name }} {{ if $f.IsTime }}int64{{ else }}{{ if $f.Nullable }}*{{ end }}{{ $f.Type }}{{ end }} `json:"{{ $f.Name }},omitempty"` + {{ end }} + } + if err := vmap.Decode(&{{ $scan }}); err != nil { + return err + } + for _, v := range {{ $scan }} { + *{{ $receiver }} = append(*{{ $receiver }}, &{{ $.Name }}{ + ID: v.ID, + {{ range $_, $f := $.Fields }} + {{- pascal $f.Name }}: {{- if $f.IsTime }}time.Unix(v.{{ pascal $f.Name }}, 0) {{ else }}v.{{ pascal $f.Name }}{{ end }}, + {{ end -}} + }) + } + return nil +} + +// FromRows scans the sql response data into {{ $slice }}. +func ({{ $receiver }} *{{ $slice }}) FromRows(rows *sql.Rows) error { + for rows.Next() { + {{- $scan = print "v" $receiver }} + {{ $scan }} := &{{ $.Name }}{} + if err := {{ $scan }}.FromRows(rows); err != nil { + return err + } + *{{ $receiver }} = append(*{{ $receiver }}, {{ $scan }}) + } + return nil +} + +func ({{ $receiver }} {{ $slice }}) config(cfg config) { + for i := range {{ $receiver }} { + {{ $receiver }}[i].config = cfg + } +} +{{ end }} diff --git a/entc/gen/template/example.tmpl b/entc/gen/template/example.tmpl new file mode 100644 index 000000000..3cb543e21 --- /dev/null +++ b/entc/gen/template/example.tmpl @@ -0,0 +1,91 @@ +{{ define "example" }} +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +import ( + "log" + "testing" + "os" + "net/url" + + {{ range $_, $n := $.Nodes }} + "{{ $n.Config.Package }}/{{ $n.Package }}" + {{- end }} + + "fbc/ent/dialect" + "fbc/lib/go/gremlin" +) + +{{ $env := upper $pkg | printf "%s_INTEGRATION_ENDPOINT" }} + +// endpoint for the database. In order to run the tests locally, run the following command: +// +// {{ $env }}="http://localhost:8182" go test -v +// +var endpoint *gremlin.Endpoint + +func init() { + if e, ok := os.LookupEnv("{{ $env }}"); ok { + if u, err := url.Parse(e); err == nil { + endpoint = &gremlin.Endpoint{u} + } + } +} + +{{ range $_, $n := $.Nodes -}} +func Example{{ pascal $n.Name }}() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the {{ lower $n.Name }}'s edges. + {{ range $i, $e := $n.Edges }} + {{- if not $e.IsInverse }} + {{- $v := printf "%s%d" $e.Type.Receiver $i }} + {{- $v }} := client.{{ $e.Type.Name }}. + Create(). + {{ range $_, $f := $e.Type.Fields }} + {{- pascal $f.Name | printf "Set%s" }}({{ $f.ExampleCode }}). + {{ end }} + SaveX(ctx) + log.Println("{{ lower $e.Type.Name }} created:", {{ $v }}) + {{ end }} + {{- end }} + // create {{ lower $n.Name }} vertex with its edges. + {{ $n.Receiver }} := client.{{ $n.Name }}. + Create(). + {{ range $_, $f := $n.Fields }} + {{- pascal $f.Name | printf "Set%s" }}({{ $f.ExampleCode }}). + {{ end }} + {{ range $i, $e := $n.Edges }} + {{- if not $e.IsInverse }} + {{- $op := "add" }}{{ if $e.Unique }}{{ $op = "set" }}{{ end }} + {{- $func := print (pascal $op) (pascal $e.Name) }} + {{- $func }}({{ printf "%s%d" $e.Type.Receiver $i }}). + {{ end }} + {{ end }} + SaveX(ctx) + log.Println("{{ lower $n.Name }} created:", {{ $n.Receiver }}) + + // query edges. + {{ range $i, $e := $n.Edges }} + {{- if not $e.IsInverse }} + {{- $v := printf "%s%d" $e.Type.Receiver $i }} + {{- $v }}, err = {{ $n.Receiver }}.{{ pascal $e.Name | printf "Query%s" }}().First(ctx) + if err != nil { + log.Fatalf("failed querying {{ $e.Name }}: %v", err) + } + log.Println("{{ $e.Name }} found:", {{ $v }}) + {{ end }} + {{ end }} + // Output: +} +{{ end }} + +{{ end }} diff --git a/entc/gen/template/header.tmpl b/entc/gen/template/header.tmpl new file mode 100644 index 000000000..bf01d6ad0 --- /dev/null +++ b/entc/gen/template/header.tmpl @@ -0,0 +1,5 @@ +{{ define "header" }} +// Code generated (@generated) by entc, DO NOT EDIT. + +package {{ $ }} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/import.tmpl b/entc/gen/template/import.tmpl new file mode 100644 index 000000000..162bd1612 --- /dev/null +++ b/entc/gen/template/import.tmpl @@ -0,0 +1,27 @@ +{{ define "import" }} +import ( + "fmt" + "context" + "errors" + "strconv" + "strings" + "time" + + {{/* ignore generting on graph templates */}} + {{ if not (eq $.Config.Package $.Package) }} + "{{ $.Config.Package }}/{{ $.Package }}" + {{ with $.Config.Schema }} "{{ . }}" {{ end }} + {{ end }} + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" + "fbc/lib/go/gremlin/encoding/graphson" +) +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/meta.tmpl b/entc/gen/template/meta.tmpl new file mode 100644 index 000000000..9a2693a1b --- /dev/null +++ b/entc/gen/template/meta.tmpl @@ -0,0 +1,100 @@ +{{ define "meta" }} + +{{ template "header" $.Package }} + +{{ template "import" $ }} + +const ( + // Label holds the string label denoting the {{ lower $.Name }} type in the database. + Label = "{{ $.Label }}" + {{ range $_, $e := $.Edges }}{{ $label := $e.Constant -}} + {{ if $e.IsInverse }}{{- $label = $e.InverseConstant -}} + // {{ $label }} holds the string label denoting the {{ lower $e.Name }} inverse edge type in the database. + {{ else -}} + // {{ $label }} holds the string label denoting the {{ lower $e.Name }} edge type in the database. + {{ end -}} + {{ $label }} = "{{ $e.Label }}" + {{ end -}} + {{ range $_, $f := $.Fields -}}{{ $field := $f.Constant -}} + // {{ $field }} holds the string denoting the {{ lower $f.Name }} vertex property in the database. + {{ $field }} = "{{ snake $f.Name }}" + {{ if $f.HasDefault }} + {{- $default := $f.DefaultConstant -}} + // {{ $default }} holds the default value for the {{ $f.Name }} field. + {{ $default }} {{ if $f.Type.Numeric }} {{ $f.Type }} {{ end }} = {{ printf "%#v" $f.Default }} + {{ end -}} + {{ end -}} + // {{ $.ID.Constant }} holds the string denoting the id field in the database. + {{ $.ID.Constant }} = "{{ snake $.ID.Name }}" + // Table holds the table name of the {{ lower $.Name }} in the database. + Table = "{{ $.Table }}" + {{- range $_, $e := $.Edges }} + // {{ $e.TableConstant }} is the table the holds the {{ $e.Name }} relation/edge. + {{- if $e.M2M }} The primary key declared below.{{ end }} + {{ $e.TableConstant }} = "{{ $e.Rel.Table }}" + {{- if eq $.Table $e.Type.Table | not }} + // {{ $e.InverseTableConstant }} is the table name for the {{ $e.Type.Name }} entity. + // It exists in this package in order to avoid circular dependency with the "{{ $e.Type.Package }}" package. + {{ $e.InverseTableConstant }} = "{{ $e.Type.Table }}" + {{- end }} + {{- if not $e.M2M }} + // {{ $e.ColumnConstant }} is the table column denoting the {{ $e.Name }} relation/edge. + {{ $e.ColumnConstant }} = "{{ $e.Rel.Column }}" + {{- end }} + {{- end }} +) + +// Columns holds all SQL columns are {{ lower $.Name }} fields. +var Columns = []string{ + {{ $.ID.Constant }}, + {{- range $_, $f := $.Fields }} + {{ $f.Constant }}, + {{- end }} +} + +{{ with $.NumM2M }} + var ( + {{- range $_, $e := $.Edges }} + {{- if $e.M2M }} + // {{ $e.PKConstant }} and {{ $e.ColumnConstant }}2 are the table columns denoting the + // primary key for the {{ $e.Name }} relation (M2M). + {{ $e.PKConstant }} = []string{"{{ index $e.Rel.Columns 0 }}", "{{ index $e.Rel.Columns 1 }}"} + {{- end }} + {{- end }} + ) +{{ end }} + +{{ if $.HasValidators }} +var ( + fields = {{ base $.Schema }}.{{ $.Name }}{}.Fields() + {{ range $i, $f := $.Fields -}} + {{ with $f.Validators -}} + {{ $name := $f.Validator -}} + {{ $type := printf "func (%s) error" $f.Type -}} + // {{ $name }} is a validator for the "{{ $f.Name }}" field. It is called by the builders before save. + {{ if eq $f.Validators 1 -}} + {{ $name }} = fields[{{ $i }}].Validators()[0].({{ $type }}) + {{ else -}} + {{ $name }} = func() {{ $type }} { + validators := fields[{{ $i }}].Validators() + fns := [...]func({{ $f.Type }}) error { + {{- range $j, $n := xrange $f.Validators }} + validators[{{ $j }}].(func({{ $f.Type }}) error), + {{- end }} + } + return func({{ $f.Name }} {{ $f.Type }}) error { + for _, fn := range fns { + if err := fn({{ $f.Name }}); err != nil { + return err + } + } + return nil + } + }() + {{ end -}} + {{ end -}} + {{ end -}} +) +{{ end }} + +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/migrate/migrate.tmpl b/entc/gen/template/migrate/migrate.tmpl new file mode 100644 index 000000000..2e8030eb0 --- /dev/null +++ b/entc/gen/template/migrate/migrate.tmpl @@ -0,0 +1,84 @@ +{{ define "migrate" }} +{{ template "header" "migrate" }} + +import ( + "fbc/ent/field" + "fbc/ent/dialect/sql/schema" +) + +var ( + nullable = true + {{- range $_, $t := $.Tables }} + {{- $columns := pascal $t.Name | printf "%sColumns" }} + // {{ $columns }} holds the columns for the "{{ $t.Name }}" table. + {{ $columns }} = []*schema.Column{ + {{- range $_, $c := $t.Columns }} + { Name: "{{ $c.Name }}", Type: field.{{ $c.Type.ConstName }}, + {{- if $c.Unique }} Unique: true,{{ end }} + {{- if $c.Increment }} Increment: true,{{ end }} + {{- if $c.Nullable }} Nullable: &nullable,{{ end }} + {{- with $c.Size }} Size: {{ . }},{{ end }} + {{- with $c.Attr }} Attr: "{{ . }}",{{ end }} + {{- with $c.Default }} Default: "{{ . }}",{{ end }} }, + {{- end }} + } + {{- $table := pascal $t.Name | printf "%sTable" }} + // {{ $table }} holds the schema information for the "{{ $t.Name }}" table. + {{ $table }} = &schema.Table{ + Name: "{{ $t.Name }}", + Columns: {{ $columns }}, + PrimaryKey: []*schema.Column{ + {{- range $_, $pk := $t.PrimaryKey }} + {{- range $i, $c := $t.Columns }} + {{- if eq $pk.Name $c.Name }}{{ $columns }}[{{ $i }}],{{ end }} + {{- end }} + {{- end }} + }, + ForeignKeys: []*schema.ForeignKey{ + {{- range $_, $fk := $t.ForeignKeys }} + { + Symbol: "{{ $fk.Symbol }}", + Columns: []*schema.Column{ + {{- range $_, $c1 := $fk.Columns }} + {{- range $i, $c2 := $t.Columns }} + {{- if eq $c1.Name $c2.Name }}{{ $columns }}[{{ $i }}],{{ end }} + {{- end }} + {{- end }} + }, + {{/* postpone refrencing to avoid typechecking loops */}} + RefColumns: []*schema.Column{ + {{- range $_, $c1 := $fk.RefColumns }} + {{- range $i, $c2 := $fk.RefTable.Columns }} + {{- if eq $c1.Name $c2.Name }}{{ pascal $fk.RefTable.Name | printf "%sColumns" }}[{{ $i }}],{{ end }} + {{- end }} + {{- end }} + }, + {{- with $fk.OnUpdate.ConstName }} + OnUpdate: schema.{{ . }}, + {{- end }} + {{- with $fk.OnDelete.ConstName }} + OnDelete: schema.{{ . }}, + {{- end }} + }, + {{- end }} + }, + } + {{- end }} + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + {{- range $_, $t := $.Tables }} + {{ pascal $t.Name | printf "%sTable" }}, + {{- end }} + } +) + +func init() { + {{- range $_, $t := $.Tables }} + {{- $table := pascal $t.Name | printf "%sTable" }} + {{- range $i, $fk := $t.ForeignKeys }} + {{ $table }}.ForeignKeys[{{ $i }}].RefTable = {{ pascal $fk.RefTable.Name | printf "%sTable" }} + {{- end }} + {{- end }} +} + +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/migrate/schema.tmpl b/entc/gen/template/migrate/schema.tmpl new file mode 100644 index 000000000..bcf167d69 --- /dev/null +++ b/entc/gen/template/migrate/schema.tmpl @@ -0,0 +1,43 @@ +{{ define "schema" }} +{{ template "header" "migrate" }} +{{ $pkg := base $.Config.Package }} + + +import ( + "context" + "fmt" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql/schema" +) + +// SQLDialect wraps the dialect.Driver with additional migration methods. +type SQLDriver interface { + Create(context.Context, ...*schema.Table) error +} + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv SQLDriver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { + s := &Schema{} + switch drv.Dialect() { + case dialect.MySQL: + s.drv = &schema.MySQL{Driver: drv} + case dialect.SQLite: + s.drv = &schema.SQLite{Driver: drv} + } + return s +} + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context) error { + if s.drv == nil { + return fmt.Errorf("{{ $pkg }}/migrate: dialect does not support migration") + } + return s.drv.Create(ctx, Tables...) +} +{{ end }} \ No newline at end of file diff --git a/entc/gen/template/tx.tmpl b/entc/gen/template/tx.tmpl new file mode 100644 index 000000000..f7a500328 --- /dev/null +++ b/entc/gen/template/tx.tmpl @@ -0,0 +1,110 @@ +{{ define "tx" }} + +{{ $pkg := base $.Config.Package }} +{{ template "header" $pkg }} + +import ( + "context" + "sync" + + "fbc/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + {{ range $_, $n := $.Nodes -}} + // {{ $n.Name }} is the client for interacting with the {{ $n.Name }} builders. + {{ $n.Name }} *{{ $n.Name }}Client + {{ end }} +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + return tx.config.driver.(*txDriver).tx.Commit() +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + return tx.config.driver.(*txDriver).tx.Rollback() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + return &Client{ + config: tx.config, + Schema: migrate.NewSchema(tx.driver), + {{ range $_, $n := $.Nodes -}} + {{ $n.Name }}: New{{ $n.Name }}Client(tx.config), + {{ end -}} + } +} + +{{/* first node for doc example */}} +{{- $first := index $.Nodes 0 }} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: {{ $first.Name }}.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that this driver is safe for concurrent usage, however, it executes only one query +// at the time. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // protects the tx below from concurrent execution. + mu sync.Mutex + // tx is the underlying transaction. + tx dialect.Tx +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + tx.mu.Lock() + defer tx.mu.Unlock() + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + tx.mu.Lock() + defer tx.mu.Unlock() + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) + +{{ end }} diff --git a/entc/gen/template/where.tmpl b/entc/gen/template/where.tmpl new file mode 100644 index 000000000..bfaf0c6a6 --- /dev/null +++ b/entc/gen/template/where.tmpl @@ -0,0 +1,224 @@ +{{ define "where" }} + +{{ template "header" $.Package }} + +{{ template "import" $ }} + +// ID filters vertices based on their identifier. +func ID(id {{ $.ID.Type }}) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + {{- if $.ID.IsString }}id, _ := strconv.Atoi(id){{- end }} + s.Where(sql.EQ(s.C({{ $.ID.Constant }}), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +{{ range $_, $op := ops $.ID }} + {{ $r := "id" }}{{ if $op.Variadic }}{{ $r = "ids" }}{{ end }} + {{ $func := printf "ID%s" $op.Name }} + // {{ $func }} applies the {{ $op.Name }} predicate on the ID field. + func {{ $func }}({{ $r }} {{ if $op.Variadic }}...{{ end }}{{ $.ID.Type }}) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + {{- if $op.Variadic }} + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len({{ $r }}) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len({{ $r }})) + for i := range v { + {{ if $.ID.IsString }}v[i], _ = strconv.Atoi({{ $r }}[i]){{ else }}v[i] = {{ $r }}[i]{{ end }} + } + {{- else if $.ID.IsString }} + v, _ := strconv.Atoi({{ $r }}) + {{- end }} + s.Where(sql.{{ $op.Name }}(s.C({{ $.ID.Constant }}), v{{ if $op.Variadic }}...{{ end }})) + }, + Gremlin: func(t *dsl.Traversal) { + {{- if $op.Variadic }} + v := make([]interface{}, len({{ $r }})) + for i := range v { + v[i] = {{ $r }}[i] + } + {{- end }} + t.HasID(p.{{ $op.Gremlin }}({{ if $op.Variadic }}v...{{ else }}{{ $r }}{{ end }})) + }, + } + } +{{ end }} + +{{ range $_, $f := $.Fields }} + {{ $func := pascal $f.Name }} + // {{ $func }} applies equality check predicate on the {{ quote $f.Name }} field. It's identical to {{ $func }}EQ. + func {{ $func }}(v {{ $f.Type }}) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C({{ $f.Constant }}), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, {{ $f.Constant }}, p.EQ(v)) + }, + } + } +{{ end }} + +{{ range $_, $f := $.Fields }} + {{ range $_, $op := (ops $f) }} + {{ $r := "v" }}{{ if $op.Variadic }}{{ $r = "vs" }}{{ end }} + {{ $func := print (pascal $f.Name) ($op.Name) }} + // {{ $func }} applies the {{ $op.Name }} predicate on the {{ quote $f.Name }} field. + func {{ $func }}({{ $r }} {{ if $op.Variadic }}...{{ end }}{{ $f.Type }}) ent.Predicate { + {{- if $op.Variadic }} + v := make([]interface{}, len({{ $r }})) + for i := range v { + v[i] = {{ $r }}[i] + } + {{- end }} + return ent.Predicate{ + SQL: func(s *sql.Selector) { + {{- if $op.Variadic }} + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len({{ $r }}) == 0 { + s.Where(sql.False()) + return + } + {{- end }} + s.Where(sql.{{ $op.Name }}(s.C({{ $f.Constant }}), v{{ if $op.Variadic }}...{{ end }})) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, {{ $f.Constant }}, p.{{ $op.Gremlin }}(v{{ if $op.Variadic }}...{{ end }})) + }, + } + } + {{ end }} +{{ end }} + +{{ range $_, $e := $.Edges }} + {{ $func := pascal $e.Name | printf "Has%s" }} + {{ $label := $e.Constant }} + {{ $direction := "Out" }} + {{ $inverse_direction := "In" }} + {{ if $e.IsInverse }} + {{ $direction = "In" }} + {{ $inverse_direction = "Out" }} + {{/* avoid circular dependecies */}} + {{ $label = $e.InverseConstant }} + {{ end }} + // {{ $func }} applies the HasEdge predicate on the {{ quote $e.Name }} edge. + func {{ $func }}() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + {{- if $e.M2M }} + t1 := s.Table() + s.Where( + sql.In( + t1.C({{ $.ID.Constant }}), + sql.Select({{ $e.PKConstant }}[{{ if $e.IsInverse }}1{{ else }}0{{ end }}]).From(sql.Table({{ $e.TableConstant }})), + ), + ) + {{- else if or $e.M2O (and $e.O2O $e.IsInverse) }}{{/* M2O || (O2O with inverse edge) */}} + t1 := s.Table() + s.Where(sql.NotNull(t1.C({{ $e.ColumnConstant }}))) + {{- else }}{{/* O2M || (O2O with assoc edge) */}} + t1 := s.Table() + s.Where( + sql.In( + t1.C({{ $.ID.Constant }}), + sql.Select({{ $e.ColumnConstant }}). + From(sql.Table({{ $e.TableConstant }})). + Where(sql.NotNull({{ $e.ColumnConstant }})), + ), + ) + {{- end }} + }, + Gremlin: func(t *dsl.Traversal) { + {{- /* if it's an edge with self-reference, take the two vertices */}} + {{- if $e.SelfRef }} + t.Both({{ $label }}) + {{- else }} + t.{{ $direction }}E({{ $label }}).{{ $direction }}V() + {{- end }} + }, + } + } + {{ $func = printf "%sWith" $func }} + // {{ $func }} applies the HasEdge predicate on the {{ quote $e.Name }} edge with a given conditions (other predicates). + func {{ $func }}(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + {{- if $e.M2M }} + {{ $i := 1 }}{{ $j := 0 }}{{- if $e.IsInverse }}{{ $i = 0 }}{{ $j = 1 }}{{ end -}} + t1 := s.Table() + t2 := sql.Table( + {{- if ne $.Table $e.Type.Table -}} + {{ $e.InverseTableConstant }} + {{- else -}} + Table + {{- end -}} + ) + t3 := sql.Table({{ $e.TableConstant }}) + t4 := sql.Select(t3.C({{ $e.PKConstant }}[{{ $j }}])). + From(t3). + Join(t2). + On(t3.C({{ $e.PKConstant }}[{{ $i }}]), t2.C({{ $e.Type.ID.Constant }})) + t5 := sql.Select().From(t2) + for _, p := range preds { + p.SQL(t5) + } + t4.FromSelect(t5) + s.Where(sql.In(t1.C({{ $.ID.Constant }}), t4)) + {{- else if or $e.M2O (and $e.O2O $e.IsInverse) }}{{/* M2O || (O2O with inverse edge) */}} + t1 := s.Table() + t2 := sql.Select({{ $e.Type.ID.Constant }}).From(sql.Table( + {{- if ne $.Table $e.Type.Table -}} + {{ $e.InverseTableConstant }} + {{- else -}} + {{ $e.TableConstant }} + {{- end -}} + )) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C({{ $e.ColumnConstant }}), t2)) + {{- else }}{{/* O2M || (O2O with assoc edge) */}} + t1 := s.Table() + t2 := sql.Select({{ $e.ColumnConstant }}).From(sql.Table({{ $e.TableConstant }})) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C({{ $.ID.Constant }}), t2)) + {{- end }} + }, + Gremlin: func(t *dsl.Traversal) { + {{- if $e.SelfRef }}{{/* selfref means it should be true in one of the directions */}} + in, out := __.InV(), __.OutV() + for _, p := range preds { + p.Gremlin(in) + p.Gremlin(out) + } + t.Where( + __.Or( + __.OutE({{ $label }}).Where(in), + __.InE({{ $label }}).Where(out), + ), + ) + {{- else }} + tr := __.{{ $inverse_direction }}V() + for _, p := range preds { + p.Gremlin(tr) + } + t.{{ $direction }}E({{ $label }}).Where(tr).{{ $direction }}V() + {{- end }} + }, + } + } +{{ end }} + +{{ end }} diff --git a/entc/gen/type.go b/entc/gen/type.go new file mode 100644 index 000000000..1fa0d1b65 --- /dev/null +++ b/entc/gen/type.go @@ -0,0 +1,433 @@ +package gen + +import ( + "fmt" + "go/token" + "io" + "reflect" + "strconv" + "strings" + + "fbc/ent" + "fbc/ent/dialect/sql/schema" + "fbc/ent/field" + + "github.com/olekukonko/tablewriter" +) + +type ( + // Type represents one node/type in the graph, its relations and the information it holds. + Type struct { + Config + // Name holds the type/ent name. + Name string + // ID holds the ID field of this type. + ID *Field + // Fields holds all the primitive fields of this type. + Fields []*Field + // Edge holds all the edges of this type. + Edges []*Edge + } + + // Field holds the information of a type field used for the templates. + Field struct { + // Name is the name of this field in the database schema. + Name string + // Type holds the type information of the field. + Type field.Type + // Unique indicate if this field is a unique field. + Unique bool + // Optional indicates is this field is optional on create. + Optional bool + // Nullable indicates that this field can be null. + Nullable bool + // Default holds the default value of this field on creation. + Default interface{} + // StructTag of the field. default to "json". + StructTag string + // Validators holds the number of validators this field have. + Validators int + } + + // Edge of a graph between two types. + Edge struct { + // Name holds the name of the edge. + Name string + // Type holds a reference to the type this edge is directed to. + Type *Type + // Optional indicates is this edge is optional on create. + Optional bool + // Unique indicates if this edge is a unique edge. + Unique bool + // Inverse holds the name of the inverse edge. + Inverse string + // Owner holds the type of the edge-owner. For assoc-edges it's the + // type that holds the edge, for inverse-edges, it's the assoc type. + Owner *Type + // StructTag of the edge-field in the struct. default to "json". + StructTag string + // Relation holds the relation info of an edge. + Rel Relation + // SelfRef indicates if this edge is a self-reference to the same + // type with the same name. For example, a User type have one of + // following edges: + // + // edge.To("friends", User.Type) // many 2 many. + // edge.To("spouse", User.Type).Unique() // one 2 one. + // + SelfRef bool + } + + // Relation holds the relational database information for edges. + Relation struct { + // Type holds the relation type of the edge. + Type Rel + // Table holds the relation table for this edge. + // For O2O and O2M, it's the table name of the type we're this edge point to. + // For M2O, this is the owner's type, and for M2M this is the join table. + Table string + // Columns holds the relation column in the relation table above. + // In O2M, M2O and O2O, this the first element. + Columns []string + } +) + +// NewType creates a new type and its fields from the given schema. +func NewType(c Config, schema ent.Schema) (*Type, error) { + typ := &Type{ + Config: c, + Name: reflect.TypeOf(schema).Name(), + ID: &Field{ + Name: "id", + Type: field.TypeString, + StructTag: `json:"id,omitempty"`, + }, + } + for _, f := range schema.Fields() { + if !f.Type().Valid() { + return nil, fmt.Errorf("invalid type for field %s", f.Name()) + } + typ.Fields = append(typ.Fields, &Field{ + Name: f.Name(), + Type: f.Type(), + Unique: f.IsUnique(), + Default: f.Value(), + Nullable: f.IsNullable(), + Optional: f.IsOptional(), + StructTag: structTag(f.Name(), f.Tag()), + Validators: len(f.Validators()), + }) + } + return typ, nil +} + +// Label returns Gremlin label name of the node/type. +func (t Type) Label() string { return snake(t.Name) } + +// Table returns SQL table name of the node/type. +func (t Type) Table() string { return snake(rules.Pluralize(t.Name)) } + +// Package returns the package name of this node. +func (t Type) Package() string { return strings.ToLower(t.Name) } + +// Receiver returns the receiver name of this node. It makes sure the +// receiver names doesn't conflict with import names. +func (t Type) Receiver() string { + parts := strings.Split(snake(t.Name), "_") + min := len(parts[0]) + for _, w := range parts[1:] { + if len(w) < min { + min = len(w) + } + } + for i := 1; i < min; i++ { + r := parts[0][:i] + for _, w := range parts[1:] { + r += w[:i] + } + if _, ok := t.Config.imports[r]; !ok { + return r + } + } + return strings.ToLower(t.Name) +} + +// HasAssoc returns true if this type has an assoc edge with the given name. +func (t Type) HasAssoc(name string) (*Edge, bool) { + for _, e := range t.Edges { + if name == e.Name { + return e, true + } + } + return nil, false +} + +// HasValidators indicates if any of this field has validators. +func (t Type) HasValidators() bool { + for _, f := range t.Fields { + if f.Validators > 0 { + return true + } + } + return false +} + +// NumConstraint returns the type's constraint count. Used for slice allocation. +func (t Type) NumConstraint() int { + var n int + for _, f := range t.Fields { + if f.Unique { + n++ + } + } + for _, e := range t.Edges { + if e.HasConstraint() { + n++ + } + } + return n +} + +// NumM2M returns the type's many-to-many edge count +func (t Type) NumM2M() int { + var n int + for _, e := range t.Edges { + if e.M2M() { + n++ + } + } + return n +} + +// Describe returns description of a type. The format of the description is: +// +// Type: +// +// +// +// +func (t Type) Describe(w io.Writer) { + b := &strings.Builder{} + b.WriteString(t.Name + ":\n") + table := tablewriter.NewWriter(b) + table.SetAutoFormatHeaders(false) + table.SetHeader([]string{"Field", "Type", "Unique", "Optional", "Nullable", "Default", "StructTag", "Validators"}) + for _, f := range append([]*Field{t.ID}, t.Fields...) { + v := reflect.ValueOf(*f) + row := make([]string, v.NumField()) + for i := range row { + row[i] = fmt.Sprint(v.Field(i).Interface()) + } + table.Append(row) + } + table.Render() + table = tablewriter.NewWriter(b) + table.SetAutoFormatHeaders(false) + table.SetHeader([]string{"Edge", "Type", "Inverse", "BackRef", "Relation", "Unique", "Optional"}) + for _, e := range t.Edges { + table.Append([]string{ + e.Name, + e.Type.Name, + strconv.FormatBool(e.IsInverse()), + e.Inverse, + e.Rel.Type.String(), + strconv.FormatBool(e.Unique), + strconv.FormatBool(e.Optional), + }) + } + if table.NumLines() > 0 { + table.Render() + } + io.WriteString(w, strings.ReplaceAll(b.String(), "\n", "\n\t")+"\n") +} + +// HasDefault returns if this field has a default value. +func (f Field) HasDefault() bool { return f.Default != nil } + +// Constant returns the constant name of the field. +func (f Field) Constant() string { return "Field" + pascal(f.Name) } + +// DefaultConstant returns the constant name of the default value of this field. +func (f Field) DefaultConstant() string { return "Default" + pascal(f.Name) } + +// StructField returns the struct member of the field. +func (f Field) StructField() string { + if token.Lookup(f.Name).IsKeyword() { + return "_" + f.Name + } + return f.Name +} + +// Validator returns the validator name. +func (f Field) Validator() string { return pascal(f.Name) + "Validator" } + +// IsTime returns true if the field is timestamp field. +func (f Field) IsTime() bool { return f.Type == field.TypeTime } + +// IsString returns true if the field is a string field. +func (f Field) IsString() bool { return f.Type == field.TypeString } + +// NullType returns the sql null-type for optional and nullable fields. +func (f Field) NullType() string { + switch f.Type { + case field.TypeString: + return "sql.NullString" + case field.TypeBool: + return "sql.NullBool" + case field.TypeTime: + return "sql.NullTime" + case field.TypeInt, field.TypeInt64: + return "sql.NullInt64" + case field.TypeFloat64: + return "sql.NullFloat64" + } + return "interface{}" +} + +// NullTypeField extracts the nullable type field (if exists) from the given receiver. +// It also does the type conversion if needed. +func (f Field) NullTypeField(rec string) string { + switch f.Type { + case field.TypeString, field.TypeBool, field.TypeInt64, field.TypeFloat64: + return fmt.Sprintf("%s.%s", rec, strings.Title(f.Type.String())) + case field.TypeInt: + return fmt.Sprintf("int(%s.Int64)", rec) + case field.TypeTime: + return fmt.Sprintf("%s.Time", rec) + } + return rec +} + +// Column returns the table column. It sets it as a primary key (auto_increment) in case of ID field. +func (f Field) Column() *schema.Column { + c := &schema.Column{Name: f.Name, Type: f.Type, Unique: f.Unique} + if f.Name == "id" { + c.Type = field.TypeInt + c.Increment = true + } + return c +} + +// ExampleCode returns an example code of the field value for the example_test file. +func (f Field) ExampleCode() string { + switch f.Type { + case field.TypeString: + return "\"string\"" + case field.TypeBool: + return "true" + case field.TypeTime: + return "time.Now()" + default: + return "1" + } +} + +// Label returns the Gremlin label name of the edge. +// If the edge is inverse +func (e Edge) Label() string { + if e.IsInverse() { + return fmt.Sprintf("%s_%s", e.Owner.Label(), snake(e.Inverse)) + } + return fmt.Sprintf("%s_%s", e.Owner.Label(), snake(e.Name)) +} + +// M2M indicates if this edge is M2M edge. +func (e Edge) M2M() bool { return e.Rel.Type == M2M } + +// M2O indicates if this edge is M2O edge. +func (e Edge) M2O() bool { return e.Rel.Type == M2O } + +// O2M indicates if this edge is O2M edge. +func (e Edge) O2M() bool { return e.Rel.Type == O2M } + +// O2O indicates if this edge is O2O edge. +func (e Edge) O2O() bool { return e.Rel.Type == O2O } + +// IsInverse returns if this edge is an inverse edge. +func (e Edge) IsInverse() bool { return e.Inverse != "" } + +// Constant returns the constant name of the edge. +// If the edge is inverse, it returns the constant name of the owner-edge (assoc-edge). +func (e Edge) Constant() string { + name := e.Name + if e.IsInverse() { + name = e.Inverse + } + return pascal(name) + "Label" +} + +// InverseConstant returns the inverse constant name of the edge. +func (e Edge) InverseConstant() string { return pascal(e.Name) + "InverseLabel" } + +// TableConstant returns the constant name of the relation table. +func (e Edge) TableConstant() string { return pascal(e.Name) + "Table" } + +// InverseTableConstant returns the constant name of the other/inverse type of the relation. +func (e Edge) InverseTableConstant() string { return pascal(e.Name) + "InverseTable" } + +// ColumnConstant returns the constant name of the relation column. +func (e Edge) ColumnConstant() string { return pascal(e.Name) + "Column" } + +// PKConstant returns the constant name of the primary key. Used for M2M edges. +func (e Edge) PKConstant() string { return pascal(e.Name) + "PrimaryKey" } + +// HasConstraint indicates if this edge has a unique constraint check. +// We check uniqueness when both-directions are unique or one of them. +func (e Edge) HasConstraint() bool { + return e.Rel.Type == O2O || e.Rel.Type == O2M +} + +// StructField returns the struct member of the edge. +func (e Edge) StructField() string { + if token.Lookup(e.Name).IsKeyword() { + return "_" + e.Name + } + return e.Name +} + +// Column returns the first element from the columns slice. +func (r Relation) Column() string { + if len(r.Columns) == 0 { + panic(fmt.Sprintf("missing column for Relation.Table: %s", r.Table)) + } + return r.Columns[0] +} + +// Rel is a relation type of an edge. +type Rel int + +// Relation types. +const ( + Unk Rel = iota // Unknown. + O2O // One to one / has one. + O2M // One to many / has many. + M2O // Many to one (inverse perspective for O2M). + M2M // Many to many. +) + +// String returns the relation name. +func (r Rel) String() string { + s := "Unknown" + switch r { + case O2O: + s = "O2O" + case O2M: + s = "O2M" + case M2O: + s = "M2O" + case M2M: + s = "M2M" + } + return s +} + +func structTag(name, tag string) string { + t := fmt.Sprintf(`json:"%s,omitempty"`, name) + if tag == "" { + return t + } + if _, ok := reflect.StructTag(tag).Lookup("json"); !ok { + tag = t + " " + tag + } + return tag +} diff --git a/entc/gen/type_test.go b/entc/gen/type_test.go new file mode 100644 index 000000000..8813a10a8 --- /dev/null +++ b/entc/gen/type_test.go @@ -0,0 +1,240 @@ +package gen + +import ( + "strings" + "testing" + + "fbc/ent/field" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestType(t *testing.T) { + require := require.New(t) + typ, err := NewType(Config{Package: "entc/gen"}, T1{}) + require.NoError(err) + require.NotNil(typ) + require.Equal("T1", typ.Name) + require.Equal("t1", typ.Label()) + require.Equal("t1", typ.Package()) + require.Equal("t", typ.Receiver()) +} + +func TestType_Label(t *testing.T) { + tests := []struct { + name string + label string + }{ + {"User", "user"}, + {"UserInfo", "user_info"}, + {"PHBOrg", "phb_org"}, + {"UserID", "user_id"}, + {"HTTPCode", "http_code"}, + } + for _, tt := range tests { + typ := &Type{Name: tt.name} + require.Equal(t, tt.label, typ.Label()) + } +} + +func TestType_Table(t *testing.T) { + tests := []struct { + name string + label string + }{ + {"User", "users"}, + {"Device", "devices"}, + {"UserInfo", "user_infos"}, + {"PHBOrg", "phb_orgs"}, + {"HTTPCode", "http_codes"}, + } + for _, tt := range tests { + typ := &Type{Name: tt.name} + require.Equal(t, tt.label, typ.Table()) + } +} + +func TestType_Receiver(t *testing.T) { + tests := []struct { + name string + receiver string + }{ + {"User", "u"}, + {"Group", "gr"}, + {"UserData", "ud"}, + {"UserInfo", "ui"}, + {"User_Info", "ui"}, + {"PHBUser", "pu"}, + {"PHBOrg", "po"}, + {"DomainSpecificLang", "dospla"}, + } + for _, tt := range tests { + typ := &Type{Name: tt.name, Config: Config{Package: "entc/gen", imports: imports()}} + require.Equal(t, tt.receiver, typ.Receiver()) + } +} + +func TestType_Package(t *testing.T) { + tests := []struct { + name string + pkg string + }{ + {"User", "user"}, + {"UserInfo", "userinfo"}, + {"PHBOrg", "phborg"}, + {"UserID", "userid"}, + {"HTTPCode", "httpcode"}, + } + for _, tt := range tests { + typ := &Type{Name: tt.name} + require.Equal(t, tt.pkg, typ.Package()) + } +} + +func TestField(t *testing.T) { + f := &Field{Type: field.TypeTime} + require.True(t, f.IsTime()) + require.Equal(t, "time.Now()", f.ExampleCode()) + + require.Equal(t, "1", Field{Type: field.TypeInt}.ExampleCode()) + require.Equal(t, "true", Field{Type: field.TypeBool}.ExampleCode()) + require.Equal(t, "1", Field{Type: field.TypeFloat64}.ExampleCode()) + require.Equal(t, "\"string\"", Field{Type: field.TypeString}.ExampleCode()) +} + +func TestField_Constant(t *testing.T) { + tests := []struct { + name string + constant string + }{ + {"user", "FieldUser"}, + {"user_id", "FieldUserID"}, + {"user_name", "FieldUserName"}, + } + for _, tt := range tests { + typ := &Field{Name: tt.name} + require.Equal(t, tt.constant, typ.Constant()) + } +} + +func TestField_DefaultConstant(t *testing.T) { + tests := []struct { + name string + constant string + }{ + {"active", "DefaultActive"}, + {"expired_at", "DefaultExpiredAt"}, + {"group_name", "DefaultGroupName"}, + } + for _, tt := range tests { + typ := &Field{Name: tt.name} + require.Equal(t, tt.constant, typ.DefaultConstant()) + } +} + +func TestEdge(t *testing.T) { + u, g := &Type{Name: "User"}, &Type{Name: "Group"} + groups := &Edge{Name: "groups", Type: g, Owner: u, Rel: Relation{Type: M2M}} + users := &Edge{Name: "users", Inverse: "groups", Type: u, Owner: u, Rel: Relation{Type: M2M}} + + require.True(t, users.IsInverse()) + require.False(t, groups.IsInverse()) + + require.Equal(t, "GroupsLabel", users.Constant()) + require.Equal(t, "GroupsLabel", groups.Constant()) + + require.Equal(t, "UsersInverseLabel", users.InverseConstant()) + require.Equal(t, "user_groups", users.Label()) + require.Equal(t, "user_groups", groups.Label()) +} + +func TestType_Describe(t *testing.T) { + tests := []struct { + typ *Type + out string + }{ + { + typ: &Type{ + Name: "User", + ID: &Field{Name: "id", Type: field.TypeInt}, + Fields: []*Field{ + {Name: "name", Type: field.TypeString, Validators: 1}, + {Name: "age", Type: field.TypeInt, Nullable: true}, + }, + }, + out: ` +User: + +-------+--------+--------+----------+----------+---------+-----------+------------+ + | Field | Type | Unique | Optional | Nullable | Default | StructTag | Validators | + +-------+--------+--------+----------+----------+---------+-----------+------------+ + | id | int | false | false | false | | | 0 | + | name | string | false | false | false | | | 1 | + | age | int | false | false | true | | | 0 | + +-------+--------+--------+----------+----------+---------+-----------+------------+ + +`, + }, + { + typ: &Type{ + Name: "User", + ID: &Field{Name: "id", Type: field.TypeInt}, + Edges: []*Edge{ + {Name: "groups", Type: &Type{Name: "Group"}, Rel: Relation{Type: M2M}, Optional: true}, + {Name: "spouse", Type: &Type{Name: "User"}, Unique: true, Rel: Relation{Type: O2O}}, + }, + }, + out: ` +User: + +-------+------+--------+----------+----------+---------+-----------+------------+ + | Field | Type | Unique | Optional | Nullable | Default | StructTag | Validators | + +-------+------+--------+----------+----------+---------+-----------+------------+ + | id | int | false | false | false | | | 0 | + +-------+------+--------+----------+----------+---------+-----------+------------+ + +--------+-------+---------+---------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +--------+-------+---------+---------+----------+--------+----------+ + | groups | Group | false | | M2M | false | true | + | spouse | User | false | | O2O | true | false | + +--------+-------+---------+---------+----------+--------+----------+ + +`, + }, + { + typ: &Type{ + Name: "User", + ID: &Field{Name: "id", Type: field.TypeInt}, + Fields: []*Field{ + {Name: "name", Type: field.TypeString, Validators: 1}, + {Name: "age", Type: field.TypeInt, Nullable: true}, + }, + Edges: []*Edge{ + {Name: "groups", Type: &Type{Name: "Group"}, Rel: Relation{Type: M2M}, Optional: true}, + {Name: "spouse", Type: &Type{Name: "User"}, Unique: true, Rel: Relation{Type: O2O}}, + }, + }, + out: ` +User: + +-------+--------+--------+----------+----------+---------+-----------+------------+ + | Field | Type | Unique | Optional | Nullable | Default | StructTag | Validators | + +-------+--------+--------+----------+----------+---------+-----------+------------+ + | id | int | false | false | false | | | 0 | + | name | string | false | false | false | | | 1 | + | age | int | false | false | true | | | 0 | + +-------+--------+--------+----------+----------+---------+-----------+------------+ + +--------+-------+---------+---------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +--------+-------+---------+---------+----------+--------+----------+ + | groups | Group | false | | M2M | false | true | + | spouse | User | false | | O2O | true | false | + +--------+-------+---------+---------+----------+--------+----------+ + +`, + }, + } + for _, tt := range tests { + b := &strings.Builder{} + tt.typ.Describe(b) + assert.Equal(t, tt.out, "\n"+b.String()) + } +} diff --git a/entc/gen/where.go b/entc/gen/where.go new file mode 100644 index 000000000..4f2101ff6 --- /dev/null +++ b/entc/gen/where.go @@ -0,0 +1,69 @@ +package gen + +// Op is a predicate for the where clause. +type Op int + +// List of all builtin predicates. +const ( + EQ Op = iota // = + NEQ // <> + GT // > + GTE // >= + LT // < + LTE // <= + In // within + NotIn // without + Contains // containing + HasPrefix // startingWith + HasSuffix // endingWith +) + +// Name returns the string representation of an operator. +func (o Op) Name() string { + if int(o) < len(opText) { + return opText[o] + } + return "Unknown" +} + +// Gremlin returns the gremlin code representation of an operator. +func (o Op) Gremlin() string { + if code := gremlinCode[o]; code != "" { + return code + } + return o.Name() +} + +// Variadic reports if the predicate is a variadic function. +func (o Op) Variadic() bool { + return o == In || o == NotIn +} + +var ( + // operations text. + opText = [...]string{ + EQ: "EQ", + NEQ: "NEQ", + GT: "GT", + GTE: "GTE", + LT: "LT", + LTE: "LTE", + Contains: "Contains", + HasPrefix: "HasPrefix", + HasSuffix: "HasSuffix", + In: "In", + NotIn: "NotIn", + } + // operations code in gremlin. + gremlinCode = [...]string{ + In: "Within", + NotIn: "Without", + Contains: "Containing", + HasPrefix: "StartingWith", + HasSuffix: "EndingWith", + } + // operations per type. + boolOps = []Op{EQ, NEQ} + numericOps = append(boolOps[:], GT, GTE, LT, LTE, In, NotIn) + stringOps = append(numericOps[:], Contains, HasPrefix, HasSuffix) +) diff --git a/entc/integration/README.md b/entc/integration/README.md new file mode 100644 index 000000000..bebf36bb9 --- /dev/null +++ b/entc/integration/README.md @@ -0,0 +1,36 @@ +### entc integration tests + +#### Regenerating new templates + +If you edited one of the files in `entc/gen/template` or `entc/build/template`, +please run the following command: + +For `entc/gen` +``` +cd ~/fbsource/fbcode/fbc/ent/entc/gen && go generate && cd - +``` + +For `entc/build` + +``` +cd ~/fbsource/fbcode/fbc/ent/entc/gen && go generate && cd - +``` + +Then, regenerate new assets for your schema: +``` +go run ~/fbsource/fbcode/fbc/ent/entc/cmd/entc/entc.go generate ./ent/schema +``` + +#### Running the integration tests + +``` +docker-compose -f compose/docker-compose.yaml up -d +go test +``` + +Use the `-run` flag for running specific test or set of tests. For example: +``` +go test -run=MySQL + +go test -run=SQLite/Sanity +``` diff --git a/entc/integration/compose/docker-compose.yaml b/entc/integration/compose/docker-compose.yaml new file mode 100644 index 000000000..b53712b44 --- /dev/null +++ b/entc/integration/compose/docker-compose.yaml @@ -0,0 +1,19 @@ +version: "3.7" + +services: + + mysql: + image: mysql + environment: + MYSQL_DATABASE: test + MYSQL_ROOT_PASSWORD: pass + healthcheck: + test: mysql -ppass -e "show databases" + ports: + - 3306:3306 + + neptune: + build: gremlin-server + restart: on-failure + ports: + - 8182:8182 \ No newline at end of file diff --git a/entc/integration/compose/gremlin-server/Dockerfile b/entc/integration/compose/gremlin-server/Dockerfile new file mode 100644 index 000000000..99ffc45b2 --- /dev/null +++ b/entc/integration/compose/gremlin-server/Dockerfile @@ -0,0 +1,5 @@ +# Fetch base gremlin server image +FROM tinkerpop/gremlin-server + +# Copy overriden server configuration. +COPY gremlin-server.yaml tinkergraph-empty.properties /opt/gremlin-server/conf/ diff --git a/entc/integration/compose/gremlin-server/gremlin-server.yaml b/entc/integration/compose/gremlin-server/gremlin-server.yaml new file mode 100644 index 000000000..d973ca95f --- /dev/null +++ b/entc/integration/compose/gremlin-server/gremlin-server.yaml @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +host: localhost +port: 8182 +scriptEvaluationTimeout: 30000 +channelizer: org.apache.tinkerpop.gremlin.server.channel.WsAndHttpChannelizer +graphs: { + graph: conf/tinkergraph-empty.properties} +scriptEngines: { + gremlin-groovy: { + plugins: { org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.tinkergraph.jsr223.TinkerGraphGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {classImports: [java.lang.Math], methodImports: [java.lang.Math#*]}, + org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {files: [scripts/empty-sample.groovy]}}}} +serializers: + - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0, config: { ioRegistries: [org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0] }} +metrics: { + slf4jReporter: {enabled: true, interval: 180000}} +strictTransactionManagement: false +maxInitialLineLength: 4096 +maxHeaderSize: 8192 +maxChunkSize: 8192 +maxContentLength: 65536 +maxAccumulationBufferComponents: 1024 +resultIterationBatchSize: 64 \ No newline at end of file diff --git a/entc/integration/compose/gremlin-server/tinkergraph-empty.properties b/entc/integration/compose/gremlin-server/tinkergraph-empty.properties new file mode 100644 index 000000000..591c70856 --- /dev/null +++ b/entc/integration/compose/gremlin-server/tinkergraph-empty.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +gremlin.graph=org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerGraph +gremlin.tinkergraph.vertexIdManager=UUID +gremlin.tinkergraph.edgeIdManager=UUID \ No newline at end of file diff --git a/entc/integration/ent/card.go b/entc/integration/ent/card.go new file mode 100644 index 000000000..7d1bab92d --- /dev/null +++ b/entc/integration/ent/card.go @@ -0,0 +1,140 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// Card is the model entity for the Card schema. +type Card struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Number holds the value of the "number" field. + Number string `json:"number,omitempty"` +} + +// FromResponse scans the gremlin response data into Card. +func (c *Card) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vc struct { + ID string `json:"id,omitempty"` + Number string `json:"number,omitempty"` + } + if err := vmap.Decode(&vc); err != nil { + return err + } + c.ID = vc.ID + c.Number = vc.Number + return nil +} + +// FromRows scans the sql response data into Card. +func (c *Card) FromRows(rows *sql.Rows) error { + var vc struct { + ID int + Number string + } + // the order here should be the same as in the `card.Columns`. + if err := rows.Scan( + &vc.ID, + &vc.Number, + ); err != nil { + return err + } + c.ID = strconv.Itoa(vc.ID) + c.Number = vc.Number + return nil +} + +// QueryOwner queries the owner edge of the Card. +func (c *Card) QueryOwner() *UserQuery { + return (&CardClient{c.config}).QueryOwner(c) +} + +// Update returns a builder for updating this Card. +// Note that, you need to call Card.Unwrap() before calling this method, if this Card +// was returned from a transaction, and the transaction was committed or rolled back. +func (c *Card) Update() *CardUpdateOne { + return (&CardClient{c.config}).UpdateOne(c) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (c *Card) Unwrap() *Card { + tx, ok := c.config.driver.(*txDriver) + if !ok { + panic("ent: Card is not a transactional entity") + } + c.config.driver = tx.drv + return c +} + +// String implements the fmt.Stringer. +func (c *Card) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Card(") + buf.WriteString(fmt.Sprintf("id=%v,", c.ID)) + buf.WriteString(fmt.Sprintf("number=%v", c.Number)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (c *Card) id() int { + id, _ := strconv.Atoi(c.ID) + return id +} + +// Cards is a parsable slice of Card. +type Cards []*Card + +// FromResponse scans the gremlin response data into Cards. +func (c *Cards) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vc []struct { + ID string `json:"id,omitempty"` + Number string `json:"number,omitempty"` + } + if err := vmap.Decode(&vc); err != nil { + return err + } + for _, v := range vc { + *c = append(*c, &Card{ + ID: v.ID, + Number: v.Number, + }) + } + return nil +} + +// FromRows scans the sql response data into Cards. +func (c *Cards) FromRows(rows *sql.Rows) error { + for rows.Next() { + vc := &Card{} + if err := vc.FromRows(rows); err != nil { + return err + } + *c = append(*c, vc) + } + return nil +} + +func (c Cards) config(cfg config) { + for i := range c { + c[i].config = cfg + } +} diff --git a/entc/integration/ent/card/card.go b/entc/integration/ent/card/card.go new file mode 100644 index 000000000..896efbe39 --- /dev/null +++ b/entc/integration/ent/card/card.go @@ -0,0 +1,39 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package card + +import ( + "fbc/ent/entc/integration/ent/schema" +) + +const ( + // Label holds the string label denoting the card type in the database. + Label = "card" + // OwnerInverseLabel holds the string label denoting the owner inverse edge type in the database. + OwnerInverseLabel = "user_card" + // FieldNumber holds the string denoting the number vertex property in the database. + FieldNumber = "number" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the card in the database. + Table = "cards" + // OwnerTable is the table the holds the owner relation/edge. + OwnerTable = "cards" + // OwnerInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + OwnerInverseTable = "users" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "owner_id" +) + +// Columns holds all SQL columns are card fields. +var Columns = []string{ + FieldID, + FieldNumber, +} + +var ( + fields = schema.Card{}.Fields() + // NumberValidator is a validator for the "number" field. It is called by the builders before save. + NumberValidator = fields[0].Validators()[0].(func(string) error) +) diff --git a/entc/integration/ent/card/where.go b/entc/integration/ent/card/where.go new file mode 100644 index 000000000..2607e7b2c --- /dev/null +++ b/entc/integration/ent/card/where.go @@ -0,0 +1,355 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package card + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Number applies equality check predicate on the "number" field. It's identical to NumberEQ. +func Number(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.EQ(v)) + }, + } +} + +// NumberEQ applies the EQ predicate on the "number" field. +func NumberEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.EQ(v)) + }, + } +} + +// NumberNEQ applies the NEQ predicate on the "number" field. +func NumberNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.NEQ(v)) + }, + } +} + +// NumberGT applies the GT predicate on the "number" field. +func NumberGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.GT(v)) + }, + } +} + +// NumberGTE applies the GTE predicate on the "number" field. +func NumberGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.GTE(v)) + }, + } +} + +// NumberLT applies the LT predicate on the "number" field. +func NumberLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.LT(v)) + }, + } +} + +// NumberLTE applies the LTE predicate on the "number" field. +func NumberLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.LTE(v)) + }, + } +} + +// NumberIn applies the In predicate on the "number" field. +func NumberIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldNumber), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.Within(v...)) + }, + } +} + +// NumberNotIn applies the NotIn predicate on the "number" field. +func NumberNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldNumber), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.Without(v...)) + }, + } +} + +// NumberContains applies the Contains predicate on the "number" field. +func NumberContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.Containing(v)) + }, + } +} + +// NumberHasPrefix applies the HasPrefix predicate on the "number" field. +func NumberHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.StartingWith(v)) + }, + } +} + +// NumberHasSuffix applies the HasSuffix predicate on the "number" field. +func NumberHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldNumber), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNumber, p.EndingWith(v)) + }, + } +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where(sql.NotNull(t1.C(OwnerColumn))) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(OwnerInverseLabel).InV() + }, + } +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FieldID).From(sql.Table(OwnerInverseTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(OwnerColumn), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(OwnerInverseLabel).Where(tr).InV() + }, + } +} diff --git a/entc/integration/ent/card_create.go b/entc/integration/ent/card_create.go new file mode 100644 index 000000000..c1d6cfe5f --- /dev/null +++ b/entc/integration/ent/card_create.go @@ -0,0 +1,179 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// CardCreate is the builder for creating a Card entity. +type CardCreate struct { + config + number *string + owner map[string]struct{} +} + +// SetNumber sets the number field. +func (cc *CardCreate) SetNumber(s string) *CardCreate { + cc.number = &s + return cc +} + +// SetOwnerID sets the owner edge to User by id. +func (cc *CardCreate) SetOwnerID(id string) *CardCreate { + if cc.owner == nil { + cc.owner = make(map[string]struct{}) + } + cc.owner[id] = struct{}{} + return cc +} + +// SetNillableOwnerID sets the owner edge to User by id if the given value is not nil. +func (cc *CardCreate) SetNillableOwnerID(id *string) *CardCreate { + if id != nil { + cc = cc.SetOwnerID(*id) + } + return cc +} + +// SetOwner sets the owner edge to User. +func (cc *CardCreate) SetOwner(u *User) *CardCreate { + return cc.SetOwnerID(u.ID) +} + +// Save creates the Card in the database. +func (cc *CardCreate) Save(ctx context.Context) (*Card, error) { + if cc.number == nil { + return nil, errors.New("ent: missing required field \"number\"") + } + if err := card.NumberValidator(*cc.number); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"number\": %v", err) + } + if len(cc.owner) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"owner\"") + } + switch cc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cc.sqlSave(ctx) + case dialect.Neptune: + return cc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (cc *CardCreate) SaveX(ctx context.Context) *Card { + v, err := cc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (cc *CardCreate) sqlSave(ctx context.Context) (*Card, error) { + var ( + res sql.Result + c = &Card{config: cc.config} + ) + tx, err := cc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(card.Table).Default(cc.driver.Dialect()) + if cc.number != nil { + builder.Set(card.FieldNumber, *cc.number) + c.Number = *cc.number + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + c.ID = strconv.FormatInt(id, 10) + if len(cc.owner) > 0 { + eid, err := strconv.Atoi(keys(cc.owner)[0]) + if err != nil { + return nil, err + } + query, args := sql.Update(card.OwnerTable). + Set(card.OwnerColumn, eid). + Where(sql.EQ(card.FieldID, id).And().IsNull(card.OwnerColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(cc.owner) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"owner\" %v already connected to a different \"Card\"", keys(cc.owner))}) + } + } + if err := tx.Commit(); err != nil { + return nil, err + } + return c, nil +} + +func (cc *CardCreate) gremlinSave(ctx context.Context) (*Card, error) { + res := &gremlin.Response{} + query, bindings := cc.gremlin().Query() + if err := cc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + c := &Card{config: cc.config} + if err := c.FromResponse(res); err != nil { + return nil, err + } + return c, nil +} + +func (cc *CardCreate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.AddV(card.Label) + if cc.number != nil { + v.Property(dsl.Single, card.FieldNumber, *cc.number) + } + for id := range cc.owner { + v.AddE(user.CardLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.CardLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(card.Label, user.CardLabel, id)), + }) + } + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr +} diff --git a/entc/integration/ent/card_delete.go b/entc/integration/ent/card_delete.go new file mode 100644 index 000000000..ccc7fdef1 --- /dev/null +++ b/entc/integration/ent/card_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/card" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// CardDelete is the builder for deleting a Card entity. +type CardDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (cd *CardDelete) Where(ps ...ent.Predicate) *CardDelete { + cd.predicates = append(cd.predicates, ps...) + return cd +} + +// Exec executes the deletion query. +func (cd *CardDelete) Exec(ctx context.Context) error { + switch cd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cd.sqlExec(ctx) + case dialect.Neptune: + return cd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cd *CardDelete) ExecX(ctx context.Context) { + if err := cd.Exec(ctx); err != nil { + panic(err) + } +} + +func (cd *CardDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(card.Table)) + for _, p := range cd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(card.Table).FromSelect(selector).Query() + return cd.driver.Exec(ctx, query, args, &res) +} + +func (cd *CardDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := cd.gremlin().Query() + return cd.driver.Exec(ctx, query, bindings, res) +} + +func (cd *CardDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(card.Label) + for _, p := range cd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// CardDeleteOne is the builder for deleting a single Card entity. +type CardDeleteOne struct { + cd *CardDelete +} + +// Exec executes the deletion query. +func (cdo *CardDeleteOne) Exec(ctx context.Context) error { + return cdo.cd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cdo *CardDeleteOne) ExecX(ctx context.Context) { + cdo.cd.ExecX(ctx) +} diff --git a/entc/integration/ent/card_query.go b/entc/integration/ent/card_query.go new file mode 100644 index 000000000..67c3a63e5 --- /dev/null +++ b/entc/integration/ent/card_query.go @@ -0,0 +1,618 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// CardQuery is the builder for querying Card entities. +type CardQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (cq *CardQuery) Where(ps ...ent.Predicate) *CardQuery { + cq.predicates = append(cq.predicates, ps...) + return cq +} + +// Limit adds a limit step to the query. +func (cq *CardQuery) Limit(limit int) *CardQuery { + cq.limit = &limit + return cq +} + +// Order adds an order step to the query. +func (cq *CardQuery) Order(o ...Order) *CardQuery { + cq.order = append(cq.order, o...) + return cq +} + +// QueryOwner chains the current query on the owner edge. +func (cq *CardQuery) QueryOwner() *UserQuery { + query := &UserQuery{config: cq.config} + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := cq.sqlQuery() + t2.Select(t2.C(card.OwnerColumn)) + query.sql = sql.Select(t1.Columns(user.Columns...)...). + From(t1). + Join(t2). + On(t1.C(user.FieldID), t2.C(card.OwnerColumn)) + case dialect.Neptune: + gremlin := cq.gremlinQuery() + query.gremlin = gremlin.InE(user.CardLabel).OutV() + } + return query +} + +// Get returns a Card entity by its id. +func (cq *CardQuery) Get(ctx context.Context, id string) (*Card, error) { + return cq.Where(card.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (cq *CardQuery) GetX(ctx context.Context, id string) *Card { + c, err := cq.Get(ctx, id) + if err != nil { + panic(err) + } + return c +} + +// First returns the first Card entity in the query. Returns *ErrNotFound when no card was found. +func (cq *CardQuery) First(ctx context.Context) (*Card, error) { + cs, err := cq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(cs) == 0 { + return nil, &ErrNotFound{card.Label} + } + return cs[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cq *CardQuery) FirstX(ctx context.Context) *Card { + c, err := cq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return c +} + +// FirstID returns the first Card id in the query. Returns *ErrNotFound when no id was found. +func (cq *CardQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = cq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{card.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (cq *CardQuery) FirstXID(ctx context.Context) string { + id, err := cq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Card entity in the query, returns an error if not exactly one entity was returned. +func (cq *CardQuery) Only(ctx context.Context) (*Card, error) { + cs, err := cq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(cs) { + case 1: + return cs[0], nil + case 0: + return nil, &ErrNotFound{card.Label} + default: + return nil, &ErrNotSingular{card.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cq *CardQuery) OnlyX(ctx context.Context) *Card { + c, err := cq.Only(ctx) + if err != nil { + panic(err) + } + return c +} + +// OnlyID returns the only Card id in the query, returns an error if not exactly one id was returned. +func (cq *CardQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = cq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{card.Label} + default: + err = &ErrNotSingular{card.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (cq *CardQuery) OnlyXID(ctx context.Context) string { + id, err := cq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Cards. +func (cq *CardQuery) All(ctx context.Context) ([]*Card, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlAll(ctx) + case dialect.Neptune: + return cq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (cq *CardQuery) AllX(ctx context.Context) []*Card { + cs, err := cq.All(ctx) + if err != nil { + panic(err) + } + return cs +} + +// IDs executes the query and returns a list of Card ids. +func (cq *CardQuery) IDs(ctx context.Context) ([]string, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlIDs(ctx) + case dialect.Neptune: + return cq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (cq *CardQuery) IDsX(ctx context.Context) []string { + ids, err := cq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cq *CardQuery) Count(ctx context.Context) (int, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlCount(ctx) + case dialect.Neptune: + return cq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (cq *CardQuery) CountX(ctx context.Context) int { + count, err := cq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cq *CardQuery) Exist(ctx context.Context) (bool, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlExist(ctx) + case dialect.Neptune: + return cq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cq *CardQuery) ExistX(ctx context.Context) bool { + exist, err := cq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Number string `json:"number,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Card.Query(). +// GroupBy(card.FieldNumber). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (cq *CardQuery) GroupBy(field string, fields ...string) *CardGroupBy { + group := &CardGroupBy{config: cq.config} + group.fields = append([]string{field}, fields...) + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = cq.sqlQuery() + case dialect.Neptune: + group.gremlin = cq.gremlinQuery() + } + return group +} + +func (cq *CardQuery) sqlAll(ctx context.Context) ([]*Card, error) { + rows := &sql.Rows{} + selector := cq.sqlQuery() + if unique := cq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := cq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var cs Cards + if err := cs.FromRows(rows); err != nil { + return nil, err + } + cs.config(cq.config) + return cs, nil +} + +func (cq *CardQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := cq.sqlQuery() + unique := []string{card.FieldID} + if len(cq.unique) > 0 { + unique = cq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := cq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (cq *CardQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := cq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (cq *CardQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := cq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (cq *CardQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(card.Table) + selector := sql.Select(t1.Columns(card.Columns...)...).From(t1) + if cq.sql != nil { + selector = cq.sql + selector.Select(selector.Columns(card.Columns...)...) + } + for _, p := range cq.predicates { + p.SQL(selector) + } + for _, p := range cq.order { + p.SQL(selector) + } + if limit := cq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (cq *CardQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (cq *CardQuery) gremlinAll(ctx context.Context) ([]*Card, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().ValueMap(true).Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var cs Cards + if err := cs.FromResponse(res); err != nil { + return nil, err + } + cs.config(cq.config) + return cs, nil +} + +func (cq *CardQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().Count().Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (cq *CardQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().HasNext().Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (cq *CardQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(card.Label) + if cq.gremlin != nil { + v = cq.gremlin.Clone() + } + for _, p := range cq.predicates { + p.Gremlin(v) + } + if len(cq.order) > 0 { + v.Order() + for _, p := range cq.order { + p.Gremlin(v) + } + } + if limit := cq.limit; limit != nil { + v.Limit(*limit) + } + if unique := cq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// CardQuery is the builder for group-by Card entities. +type CardGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *CardGroupBy) Aggregate(fns ...Aggregate) *CardGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (cgb *CardGroupBy) Scan(ctx context.Context, v interface{}) error { + switch cgb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cgb.sqlScan(ctx, v) + case dialect.Neptune: + return cgb.gremlinScan(ctx, v) + default: + return errors.New("cgb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (cgb *CardGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := cgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (cgb *CardGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CardGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (cgb *CardGroupBy) StringsX(ctx context.Context) []string { + v, err := cgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (cgb *CardGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CardGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (cgb *CardGroupBy) IntsX(ctx context.Context) []int { + v, err := cgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (cgb *CardGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CardGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (cgb *CardGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := cgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (cgb *CardGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CardGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (cgb *CardGroupBy) BoolsX(ctx context.Context) []bool { + v, err := cgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (cgb *CardGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := cgb.sqlQuery().Query() + if err := cgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (cgb *CardGroupBy) sqlQuery() *sql.Selector { + selector := cgb.sql + columns := make([]string, 0, len(cgb.fields)+len(cgb.fns)) + columns = append(columns, cgb.fields...) + for _, fn := range cgb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(cgb.fields...) +} + +func (cgb *CardGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := cgb.gremlinQuery().Query() + if err := cgb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(cgb.fields)+len(cgb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (cgb *CardGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range cgb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range cgb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return cgb.gremlin.Group(). + By(__.Values(cgb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/card_update.go b/entc/integration/ent/card_update.go new file mode 100644 index 000000000..1dcc2fbb8 --- /dev/null +++ b/entc/integration/ent/card_update.go @@ -0,0 +1,474 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// CardUpdate is the builder for updating Card entities. +type CardUpdate struct { + config + number *string + owner map[string]struct{} + clearedOwner bool + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (cu *CardUpdate) Where(ps ...ent.Predicate) *CardUpdate { + cu.predicates = append(cu.predicates, ps...) + return cu +} + +// SetNumber sets the number field. +func (cu *CardUpdate) SetNumber(s string) *CardUpdate { + cu.number = &s + return cu +} + +// SetOwnerID sets the owner edge to User by id. +func (cu *CardUpdate) SetOwnerID(id string) *CardUpdate { + if cu.owner == nil { + cu.owner = make(map[string]struct{}) + } + cu.owner[id] = struct{}{} + return cu +} + +// SetNillableOwnerID sets the owner edge to User by id if the given value is not nil. +func (cu *CardUpdate) SetNillableOwnerID(id *string) *CardUpdate { + if id != nil { + cu = cu.SetOwnerID(*id) + } + return cu +} + +// SetOwner sets the owner edge to User. +func (cu *CardUpdate) SetOwner(u *User) *CardUpdate { + return cu.SetOwnerID(u.ID) +} + +// ClearOwner clears the owner edge to User. +func (cu *CardUpdate) ClearOwner() *CardUpdate { + cu.clearedOwner = true + return cu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (cu *CardUpdate) Save(ctx context.Context) (int, error) { + if cu.number != nil { + if err := card.NumberValidator(*cu.number); err != nil { + return 0, fmt.Errorf("ent: validator failed for field \"number\": %v", err) + } + } + if len(cu.owner) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"owner\"") + } + switch cu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := cu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (cu *CardUpdate) SaveX(ctx context.Context) int { + affected, err := cu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cu *CardUpdate) Exec(ctx context.Context) error { + _, err := cu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cu *CardUpdate) ExecX(ctx context.Context) { + if err := cu.Exec(ctx); err != nil { + panic(err) + } +} + +func (cu *CardUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(card.FieldID).From(sql.Table(card.Table)) + for _, p := range cu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = cu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := cu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(card.Table).Where(sql.InInts(card.FieldID, ids...)) + ) + if cu.number != nil { + update = true + builder.Set(card.FieldNumber, *cu.number) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if cu.clearedOwner { + query, args := sql.Update(card.OwnerTable). + SetNull(card.OwnerColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(cu.owner) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(cu.owner)[0]) + if serr != nil { + return 0, err + } + query, args := sql.Update(card.OwnerTable). + Set(card.OwnerColumn, eid). + Where(sql.EQ(card.FieldID, id).And().IsNull(card.OwnerColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(cu.owner) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"owner\" %v already connected to a different \"Card\"", keys(cu.owner))}) + } + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (cu *CardUpdate) gremlinSave(ctx context.Context) ([]*Card, error) { + res := &gremlin.Response{} + query, bindings := cu.gremlin().Query() + if err := cu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var cs Cards + cs.config(cu.config) + if err := cs.FromResponse(res); err != nil { + return nil, err + } + return cs, nil +} + +func (cu *CardUpdate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.V().HasLabel(card.Label) + for _, p := range cu.predicates { + p.Gremlin(v) + } + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if cu.number != nil { + v.Property(dsl.Single, card.FieldNumber, *cu.number) + } + if cu.clearedOwner { + tr := rv.Clone().InE(user.CardLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range cu.owner { + v.AddE(user.CardLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.CardLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(card.Label, user.CardLabel, id)), + }) + } + v.ValueMap(true) + if len(constraints) > 0 { + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} + +// CardUpdateOne is the builder for updating a single Card entity. +type CardUpdateOne struct { + config + id string + number *string + owner map[string]struct{} + clearedOwner bool +} + +// SetNumber sets the number field. +func (cuo *CardUpdateOne) SetNumber(s string) *CardUpdateOne { + cuo.number = &s + return cuo +} + +// SetOwnerID sets the owner edge to User by id. +func (cuo *CardUpdateOne) SetOwnerID(id string) *CardUpdateOne { + if cuo.owner == nil { + cuo.owner = make(map[string]struct{}) + } + cuo.owner[id] = struct{}{} + return cuo +} + +// SetNillableOwnerID sets the owner edge to User by id if the given value is not nil. +func (cuo *CardUpdateOne) SetNillableOwnerID(id *string) *CardUpdateOne { + if id != nil { + cuo = cuo.SetOwnerID(*id) + } + return cuo +} + +// SetOwner sets the owner edge to User. +func (cuo *CardUpdateOne) SetOwner(u *User) *CardUpdateOne { + return cuo.SetOwnerID(u.ID) +} + +// ClearOwner clears the owner edge to User. +func (cuo *CardUpdateOne) ClearOwner() *CardUpdateOne { + cuo.clearedOwner = true + return cuo +} + +// Save executes the query and returns the updated entity. +func (cuo *CardUpdateOne) Save(ctx context.Context) (*Card, error) { + if cuo.number != nil { + if err := card.NumberValidator(*cuo.number); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"number\": %v", err) + } + } + if len(cuo.owner) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"owner\"") + } + switch cuo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cuo.sqlSave(ctx) + case dialect.Neptune: + return cuo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (cuo *CardUpdateOne) SaveX(ctx context.Context) *Card { + c, err := cuo.Save(ctx) + if err != nil { + panic(err) + } + return c +} + +// Exec executes the query on the entity. +func (cuo *CardUpdateOne) Exec(ctx context.Context) error { + _, err := cuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cuo *CardUpdateOne) ExecX(ctx context.Context) { + if err := cuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (cuo *CardUpdateOne) sqlSave(ctx context.Context) (c *Card, err error) { + selector := sql.Select(card.Columns...).From(sql.Table(card.Table)) + card.ID(cuo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = cuo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + c = &Card{config: cuo.config} + if err := c.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into Card: %v", err) + } + id = c.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: Card not found with id: %v", cuo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one Card with the same id: %v", cuo.id) + } + + tx, err := cuo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(card.Table).Where(sql.InInts(card.FieldID, ids...)) + ) + if cuo.number != nil { + update = true + builder.Set(card.FieldNumber, *cuo.number) + c.Number = *cuo.number + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if cuo.clearedOwner { + query, args := sql.Update(card.OwnerTable). + SetNull(card.OwnerColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(cuo.owner) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(cuo.owner)[0]) + if serr != nil { + return nil, err + } + query, args := sql.Update(card.OwnerTable). + Set(card.OwnerColumn, eid). + Where(sql.EQ(card.FieldID, id).And().IsNull(card.OwnerColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(cuo.owner) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"owner\" %v already connected to a different \"Card\"", keys(cuo.owner))}) + } + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return c, nil +} + +func (cuo *CardUpdateOne) gremlinSave(ctx context.Context) (*Card, error) { + res := &gremlin.Response{} + query, bindings := cuo.gremlin(cuo.id).Query() + if err := cuo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + c := &Card{config: cuo.config} + if err := c.FromResponse(res); err != nil { + return nil, err + } + return c, nil +} + +func (cuo *CardUpdateOne) gremlin(id string) *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.V(id) + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if cuo.number != nil { + v.Property(dsl.Single, card.FieldNumber, *cuo.number) + } + if cuo.clearedOwner { + tr := rv.Clone().InE(user.CardLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range cuo.owner { + v.AddE(user.CardLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.CardLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(card.Label, user.CardLabel, id)), + }) + } + v.ValueMap(true) + if len(constraints) > 0 { + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/client.go b/entc/integration/ent/client.go new file mode 100644 index 000000000..1336ee1ee --- /dev/null +++ b/entc/integration/ent/client.go @@ -0,0 +1,849 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "log" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + "fbc/ent/entc/integration/ent/migrate" + "fbc/lib/go/gremlin/graph/dsl/g" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/comment" + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + "fbc/ent/entc/integration/ent/node" + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Card is the client for interacting with the Card builders. + Card *CardClient + // Comment is the client for interacting with the Comment builders. + Comment *CommentClient + // File is the client for interacting with the File builders. + File *FileClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + // GroupInfo is the client for interacting with the GroupInfo builders. + GroupInfo *GroupInfoClient + // Node is the client for interacting with the Node builders. + Node *NodeClient + // Pet is the client for interacting with the Pet builders. + Pet *PetClient + // User is the client for interacting with the User builders. + User *UserClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + c := config{log: log.Println} + c.options(opts...) + return &Client{ + config: c, + Schema: migrate.NewSchema(c.driver), + Card: NewCardClient(c), + Comment: NewCommentClient(c), + File: NewFileClient(c), + Group: NewGroupClient(c), + GroupInfo: NewGroupInfoClient(c), + Node: NewNodeClient(c), + Pet: NewPetClient(c), + User: NewUserClient(c), + } +} + +// Tx returns a new transactional client. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, fmt.Errorf("ent: cannot start a transaction within a transaction") + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %v", err) + } + cfg := config{driver: tx, log: c.log, verbose: c.verbose} + return &Tx{ + config: cfg, + Card: NewCardClient(cfg), + Comment: NewCommentClient(cfg), + File: NewFileClient(cfg), + Group: NewGroupClient(cfg), + GroupInfo: NewGroupInfoClient(cfg), + Node: NewNodeClient(cfg), + Pet: NewPetClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// CardClient is a client for the Card schema. +type CardClient struct { + config +} + +// NewCardClient returns a client for the Card from the given config. +func NewCardClient(c config) *CardClient { + return &CardClient{config: c} +} + +// Create returns a create builder for Card. +func (c *CardClient) Create() *CardCreate { + return &CardCreate{config: c.config} +} + +// Update returns an update builder for Card. +func (c *CardClient) Update() *CardUpdate { + return &CardUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *CardClient) UpdateOne(ca *Card) *CardUpdateOne { + return c.UpdateOneID(ca.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *CardClient) UpdateOneID(id string) *CardUpdateOne { + return &CardUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for Card. +func (c *CardClient) Delete() *CardDelete { + return &CardDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *CardClient) DeleteOne(ca *Card) *CardDeleteOne { + return c.DeleteOneID(ca.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *CardClient) DeleteOneID(id string) *CardDeleteOne { + return &CardDeleteOne{c.Delete().Where(card.ID(id))} +} + +// Create returns a query builder for Card. +func (c *CardClient) Query() *CardQuery { + return &CardQuery{config: c.config} +} + +// QueryOwner queries the owner edge of a Card. +func (c *CardClient) QueryOwner(ca *Card) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := ca.id() + t1 := sql.Table(user.Table) + t2 := sql.Select(card.OwnerColumn). + From(sql.Table(card.OwnerTable)). + Where(sql.EQ(card.FieldID, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C(user.FieldID), t2.C(card.OwnerColumn)) + case dialect.Neptune: + query.gremlin = g.V(ca.ID).InE(user.CardLabel).OutV() + } + return query +} + +// CommentClient is a client for the Comment schema. +type CommentClient struct { + config +} + +// NewCommentClient returns a client for the Comment from the given config. +func NewCommentClient(c config) *CommentClient { + return &CommentClient{config: c} +} + +// Create returns a create builder for Comment. +func (c *CommentClient) Create() *CommentCreate { + return &CommentCreate{config: c.config} +} + +// Update returns an update builder for Comment. +func (c *CommentClient) Update() *CommentUpdate { + return &CommentUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *CommentClient) UpdateOne(co *Comment) *CommentUpdateOne { + return c.UpdateOneID(co.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *CommentClient) UpdateOneID(id string) *CommentUpdateOne { + return &CommentUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for Comment. +func (c *CommentClient) Delete() *CommentDelete { + return &CommentDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *CommentClient) DeleteOne(co *Comment) *CommentDeleteOne { + return c.DeleteOneID(co.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *CommentClient) DeleteOneID(id string) *CommentDeleteOne { + return &CommentDeleteOne{c.Delete().Where(comment.ID(id))} +} + +// Create returns a query builder for Comment. +func (c *CommentClient) Query() *CommentQuery { + return &CommentQuery{config: c.config} +} + +// FileClient is a client for the File schema. +type FileClient struct { + config +} + +// NewFileClient returns a client for the File from the given config. +func NewFileClient(c config) *FileClient { + return &FileClient{config: c} +} + +// Create returns a create builder for File. +func (c *FileClient) Create() *FileCreate { + return &FileCreate{config: c.config} +} + +// Update returns an update builder for File. +func (c *FileClient) Update() *FileUpdate { + return &FileUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *FileClient) UpdateOne(f *File) *FileUpdateOne { + return c.UpdateOneID(f.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *FileClient) UpdateOneID(id string) *FileUpdateOne { + return &FileUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for File. +func (c *FileClient) Delete() *FileDelete { + return &FileDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *FileClient) DeleteOne(f *File) *FileDeleteOne { + return c.DeleteOneID(f.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *FileClient) DeleteOneID(id string) *FileDeleteOne { + return &FileDeleteOne{c.Delete().Where(file.ID(id))} +} + +// Create returns a query builder for File. +func (c *FileClient) Query() *FileQuery { + return &FileQuery{config: c.config} +} + +// GroupClient is a client for the Group schema. +type GroupClient struct { + config +} + +// NewGroupClient returns a client for the Group from the given config. +func NewGroupClient(c config) *GroupClient { + return &GroupClient{config: c} +} + +// Create returns a create builder for Group. +func (c *GroupClient) Create() *GroupCreate { + return &GroupCreate{config: c.config} +} + +// Update returns an update builder for Group. +func (c *GroupClient) Update() *GroupUpdate { + return &GroupUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *GroupClient) UpdateOne(gr *Group) *GroupUpdateOne { + return c.UpdateOneID(gr.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *GroupClient) UpdateOneID(id string) *GroupUpdateOne { + return &GroupUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for Group. +func (c *GroupClient) Delete() *GroupDelete { + return &GroupDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *GroupClient) DeleteOne(gr *Group) *GroupDeleteOne { + return c.DeleteOneID(gr.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *GroupClient) DeleteOneID(id string) *GroupDeleteOne { + return &GroupDeleteOne{c.Delete().Where(group.ID(id))} +} + +// Create returns a query builder for Group. +func (c *GroupClient) Query() *GroupQuery { + return &GroupQuery{config: c.config} +} + +// QueryFiles queries the files edge of a Group. +func (c *GroupClient) QueryFiles(gr *Group) *FileQuery { + query := &FileQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := gr.id() + query.sql = sql.Select().From(sql.Table(file.Table)). + Where(sql.EQ(group.FilesColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(gr.ID).OutE(group.FilesLabel).InV() + } + return query +} + +// QueryBlocked queries the blocked edge of a Group. +func (c *GroupClient) QueryBlocked(gr *Group) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := gr.id() + query.sql = sql.Select().From(sql.Table(user.Table)). + Where(sql.EQ(group.BlockedColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(gr.ID).OutE(group.BlockedLabel).InV() + } + return query +} + +// QueryUsers queries the users edge of a Group. +func (c *GroupClient) QueryUsers(gr *Group) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := gr.id() + t1 := sql.Table(user.Table) + t2 := sql.Table(group.Table) + t3 := sql.Table(group.UsersTable) + t4 := sql.Select(t3.C(group.UsersPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(group.UsersPrimaryKey[1]), t2.C(group.FieldID)). + Where(sql.EQ(t2.C(group.FieldID), id)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(group.UsersPrimaryKey[0])) + case dialect.Neptune: + query.gremlin = g.V(gr.ID).InE(user.GroupsLabel).OutV() + } + return query +} + +// QueryInfo queries the info edge of a Group. +func (c *GroupClient) QueryInfo(gr *Group) *GroupInfoQuery { + query := &GroupInfoQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := gr.id() + t1 := sql.Table(groupinfo.Table) + t2 := sql.Select(group.InfoColumn). + From(sql.Table(group.InfoTable)). + Where(sql.EQ(group.FieldID, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C(groupinfo.FieldID), t2.C(group.InfoColumn)) + case dialect.Neptune: + query.gremlin = g.V(gr.ID).OutE(group.InfoLabel).InV() + } + return query +} + +// GroupInfoClient is a client for the GroupInfo schema. +type GroupInfoClient struct { + config +} + +// NewGroupInfoClient returns a client for the GroupInfo from the given config. +func NewGroupInfoClient(c config) *GroupInfoClient { + return &GroupInfoClient{config: c} +} + +// Create returns a create builder for GroupInfo. +func (c *GroupInfoClient) Create() *GroupInfoCreate { + return &GroupInfoCreate{config: c.config} +} + +// Update returns an update builder for GroupInfo. +func (c *GroupInfoClient) Update() *GroupInfoUpdate { + return &GroupInfoUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *GroupInfoClient) UpdateOne(gi *GroupInfo) *GroupInfoUpdateOne { + return c.UpdateOneID(gi.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *GroupInfoClient) UpdateOneID(id string) *GroupInfoUpdateOne { + return &GroupInfoUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for GroupInfo. +func (c *GroupInfoClient) Delete() *GroupInfoDelete { + return &GroupInfoDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *GroupInfoClient) DeleteOne(gi *GroupInfo) *GroupInfoDeleteOne { + return c.DeleteOneID(gi.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *GroupInfoClient) DeleteOneID(id string) *GroupInfoDeleteOne { + return &GroupInfoDeleteOne{c.Delete().Where(groupinfo.ID(id))} +} + +// Create returns a query builder for GroupInfo. +func (c *GroupInfoClient) Query() *GroupInfoQuery { + return &GroupInfoQuery{config: c.config} +} + +// QueryGroups queries the groups edge of a GroupInfo. +func (c *GroupInfoClient) QueryGroups(gi *GroupInfo) *GroupQuery { + query := &GroupQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := gi.id() + query.sql = sql.Select().From(sql.Table(group.Table)). + Where(sql.EQ(groupinfo.GroupsColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(gi.ID).InE(group.InfoLabel).OutV() + } + return query +} + +// NodeClient is a client for the Node schema. +type NodeClient struct { + config +} + +// NewNodeClient returns a client for the Node from the given config. +func NewNodeClient(c config) *NodeClient { + return &NodeClient{config: c} +} + +// Create returns a create builder for Node. +func (c *NodeClient) Create() *NodeCreate { + return &NodeCreate{config: c.config} +} + +// Update returns an update builder for Node. +func (c *NodeClient) Update() *NodeUpdate { + return &NodeUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *NodeClient) UpdateOne(n *Node) *NodeUpdateOne { + return c.UpdateOneID(n.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *NodeClient) UpdateOneID(id string) *NodeUpdateOne { + return &NodeUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for Node. +func (c *NodeClient) Delete() *NodeDelete { + return &NodeDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *NodeClient) DeleteOne(n *Node) *NodeDeleteOne { + return c.DeleteOneID(n.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *NodeClient) DeleteOneID(id string) *NodeDeleteOne { + return &NodeDeleteOne{c.Delete().Where(node.ID(id))} +} + +// Create returns a query builder for Node. +func (c *NodeClient) Query() *NodeQuery { + return &NodeQuery{config: c.config} +} + +// QueryPrev queries the prev edge of a Node. +func (c *NodeClient) QueryPrev(n *Node) *NodeQuery { + query := &NodeQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := n.id() + t1 := sql.Table(node.Table) + t2 := sql.Select(node.PrevColumn). + From(sql.Table(node.PrevTable)). + Where(sql.EQ(node.FieldID, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C(node.FieldID), t2.C(node.PrevColumn)) + case dialect.Neptune: + query.gremlin = g.V(n.ID).InE(node.NextLabel).OutV() + } + return query +} + +// QueryNext queries the next edge of a Node. +func (c *NodeClient) QueryNext(n *Node) *NodeQuery { + query := &NodeQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := n.id() + query.sql = sql.Select().From(sql.Table(node.Table)). + Where(sql.EQ(node.NextColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(n.ID).OutE(node.NextLabel).InV() + } + return query +} + +// PetClient is a client for the Pet schema. +type PetClient struct { + config +} + +// NewPetClient returns a client for the Pet from the given config. +func NewPetClient(c config) *PetClient { + return &PetClient{config: c} +} + +// Create returns a create builder for Pet. +func (c *PetClient) Create() *PetCreate { + return &PetCreate{config: c.config} +} + +// Update returns an update builder for Pet. +func (c *PetClient) Update() *PetUpdate { + return &PetUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PetClient) UpdateOne(pe *Pet) *PetUpdateOne { + return c.UpdateOneID(pe.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *PetClient) UpdateOneID(id string) *PetUpdateOne { + return &PetUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for Pet. +func (c *PetClient) Delete() *PetDelete { + return &PetDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *PetClient) DeleteOne(pe *Pet) *PetDeleteOne { + return c.DeleteOneID(pe.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *PetClient) DeleteOneID(id string) *PetDeleteOne { + return &PetDeleteOne{c.Delete().Where(pet.ID(id))} +} + +// Create returns a query builder for Pet. +func (c *PetClient) Query() *PetQuery { + return &PetQuery{config: c.config} +} + +// QueryTeam queries the team edge of a Pet. +func (c *PetClient) QueryTeam(pe *Pet) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := pe.id() + t1 := sql.Table(user.Table) + t2 := sql.Select(pet.TeamColumn). + From(sql.Table(pet.TeamTable)). + Where(sql.EQ(pet.FieldID, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C(user.FieldID), t2.C(pet.TeamColumn)) + case dialect.Neptune: + query.gremlin = g.V(pe.ID).InE(user.TeamLabel).OutV() + } + return query +} + +// QueryOwner queries the owner edge of a Pet. +func (c *PetClient) QueryOwner(pe *Pet) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := pe.id() + t1 := sql.Table(user.Table) + t2 := sql.Select(pet.OwnerColumn). + From(sql.Table(pet.OwnerTable)). + Where(sql.EQ(pet.FieldID, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C(user.FieldID), t2.C(pet.OwnerColumn)) + case dialect.Neptune: + query.gremlin = g.V(pe.ID).InE(user.PetsLabel).OutV() + } + return query +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Create returns a create builder for User. +func (c *UserClient) Create() *UserCreate { + return &UserCreate{config: c.config} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + return &UserUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(u *User) *UserUpdateOne { + return c.UpdateOneID(u.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id string) *UserUpdateOne { + return &UserUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + return &UserDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { + return c.DeleteOneID(u.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *UserClient) DeleteOneID(id string) *UserDeleteOne { + return &UserDeleteOne{c.Delete().Where(user.ID(id))} +} + +// Create returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{config: c.config} +} + +// QueryCard queries the card edge of a User. +func (c *UserClient) QueryCard(u *User) *CardQuery { + query := &CardQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + query.sql = sql.Select().From(sql.Table(card.Table)). + Where(sql.EQ(user.CardColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.CardLabel).InV() + } + return query +} + +// QueryPets queries the pets edge of a User. +func (c *UserClient) QueryPets(u *User) *PetQuery { + query := &PetQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + query.sql = sql.Select().From(sql.Table(pet.Table)). + Where(sql.EQ(user.PetsColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.PetsLabel).InV() + } + return query +} + +// QueryFiles queries the files edge of a User. +func (c *UserClient) QueryFiles(u *User) *FileQuery { + query := &FileQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + query.sql = sql.Select().From(sql.Table(file.Table)). + Where(sql.EQ(user.FilesColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.FilesLabel).InV() + } + return query +} + +// QueryGroups queries the groups edge of a User. +func (c *UserClient) QueryGroups(u *User) *GroupQuery { + query := &GroupQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + t1 := sql.Table(group.Table) + t2 := sql.Table(user.Table) + t3 := sql.Table(user.GroupsTable) + t4 := sql.Select(t3.C(user.GroupsPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(user.GroupsPrimaryKey[0]), t2.C(user.FieldID)). + Where(sql.EQ(t2.C(user.FieldID), id)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(group.FieldID), t4.C(user.GroupsPrimaryKey[1])) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.GroupsLabel).InV() + } + return query +} + +// QueryFriends queries the friends edge of a User. +func (c *UserClient) QueryFriends(u *User) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + t1 := sql.Table(user.Table) + t2 := sql.Table(user.Table) + t3 := sql.Table(user.FriendsTable) + t4 := sql.Select(t3.C(user.FriendsPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(user.FriendsPrimaryKey[0]), t2.C(user.FieldID)). + Where(sql.EQ(t2.C(user.FieldID), id)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(user.FriendsPrimaryKey[1])) + case dialect.Neptune: + query.gremlin = g.V(u.ID).Both(user.FriendsLabel) + } + return query +} + +// QueryFollowers queries the followers edge of a User. +func (c *UserClient) QueryFollowers(u *User) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + t1 := sql.Table(user.Table) + t2 := sql.Table(user.Table) + t3 := sql.Table(user.FollowersTable) + t4 := sql.Select(t3.C(user.FollowersPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(user.FollowersPrimaryKey[1]), t2.C(user.FieldID)). + Where(sql.EQ(t2.C(user.FieldID), id)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(user.FollowersPrimaryKey[0])) + case dialect.Neptune: + query.gremlin = g.V(u.ID).InE(user.FollowingLabel).OutV() + } + return query +} + +// QueryFollowing queries the following edge of a User. +func (c *UserClient) QueryFollowing(u *User) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + t1 := sql.Table(user.Table) + t2 := sql.Table(user.Table) + t3 := sql.Table(user.FollowingTable) + t4 := sql.Select(t3.C(user.FollowingPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(user.FollowingPrimaryKey[0]), t2.C(user.FieldID)). + Where(sql.EQ(t2.C(user.FieldID), id)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(user.FollowingPrimaryKey[1])) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.FollowingLabel).InV() + } + return query +} + +// QueryTeam queries the team edge of a User. +func (c *UserClient) QueryTeam(u *User) *PetQuery { + query := &PetQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + query.sql = sql.Select().From(sql.Table(pet.Table)). + Where(sql.EQ(user.TeamColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.TeamLabel).InV() + } + return query +} + +// QuerySpouse queries the spouse edge of a User. +func (c *UserClient) QuerySpouse(u *User) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + query.sql = sql.Select().From(sql.Table(user.Table)). + Where(sql.EQ(user.SpouseColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).Both(user.SpouseLabel) + } + return query +} + +// QueryChildren queries the children edge of a User. +func (c *UserClient) QueryChildren(u *User) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + query.sql = sql.Select().From(sql.Table(user.Table)). + Where(sql.EQ(user.ChildrenColumn, id)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).InE(user.ParentLabel).OutV() + } + return query +} + +// QueryParent queries the parent edge of a User. +func (c *UserClient) QueryParent(u *User) *UserQuery { + query := &UserQuery{config: c.config} + switch c.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + id := u.id() + t1 := sql.Table(user.Table) + t2 := sql.Select(user.ParentColumn). + From(sql.Table(user.ParentTable)). + Where(sql.EQ(user.FieldID, id)) + query.sql = sql.Select().From(t1).Join(t2).On(t1.C(user.FieldID), t2.C(user.ParentColumn)) + case dialect.Neptune: + query.gremlin = g.V(u.ID).OutE(user.ParentLabel).InV() + } + return query +} diff --git a/entc/integration/ent/comment.go b/entc/integration/ent/comment.go new file mode 100644 index 000000000..601a4afb3 --- /dev/null +++ b/entc/integration/ent/comment.go @@ -0,0 +1,125 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// Comment is the model entity for the Comment schema. +type Comment struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` +} + +// FromResponse scans the gremlin response data into Comment. +func (c *Comment) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vc struct { + ID string `json:"id,omitempty"` + } + if err := vmap.Decode(&vc); err != nil { + return err + } + c.ID = vc.ID + return nil +} + +// FromRows scans the sql response data into Comment. +func (c *Comment) FromRows(rows *sql.Rows) error { + var vc struct { + ID int + } + // the order here should be the same as in the `comment.Columns`. + if err := rows.Scan( + &vc.ID, + ); err != nil { + return err + } + c.ID = strconv.Itoa(vc.ID) + return nil +} + +// Update returns a builder for updating this Comment. +// Note that, you need to call Comment.Unwrap() before calling this method, if this Comment +// was returned from a transaction, and the transaction was committed or rolled back. +func (c *Comment) Update() *CommentUpdateOne { + return (&CommentClient{c.config}).UpdateOne(c) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (c *Comment) Unwrap() *Comment { + tx, ok := c.config.driver.(*txDriver) + if !ok { + panic("ent: Comment is not a transactional entity") + } + c.config.driver = tx.drv + return c +} + +// String implements the fmt.Stringer. +func (c *Comment) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Comment(") + buf.WriteString(fmt.Sprintf("id=%v,", c.ID)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (c *Comment) id() int { + id, _ := strconv.Atoi(c.ID) + return id +} + +// Comments is a parsable slice of Comment. +type Comments []*Comment + +// FromResponse scans the gremlin response data into Comments. +func (c *Comments) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vc []struct { + ID string `json:"id,omitempty"` + } + if err := vmap.Decode(&vc); err != nil { + return err + } + for _, v := range vc { + *c = append(*c, &Comment{ + ID: v.ID, + }) + } + return nil +} + +// FromRows scans the sql response data into Comments. +func (c *Comments) FromRows(rows *sql.Rows) error { + for rows.Next() { + vc := &Comment{} + if err := vc.FromRows(rows); err != nil { + return err + } + *c = append(*c, vc) + } + return nil +} + +func (c Comments) config(cfg config) { + for i := range c { + c[i].config = cfg + } +} diff --git a/entc/integration/ent/comment/comment.go b/entc/integration/ent/comment/comment.go new file mode 100644 index 000000000..f49f317ca --- /dev/null +++ b/entc/integration/ent/comment/comment.go @@ -0,0 +1,17 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package comment + +const ( + // Label holds the string label denoting the comment type in the database. + Label = "comment" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the comment in the database. + Table = "comments" +) + +// Columns holds all SQL columns are comment fields. +var Columns = []string{ + FieldID, +} diff --git a/entc/integration/ent/comment/where.go b/entc/integration/ent/comment/where.go new file mode 100644 index 000000000..a40e23284 --- /dev/null +++ b/entc/integration/ent/comment/where.go @@ -0,0 +1,156 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package comment + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} diff --git a/entc/integration/ent/comment_create.go b/entc/integration/ent/comment_create.go new file mode 100644 index 000000000..09f466c4f --- /dev/null +++ b/entc/integration/ent/comment_create.go @@ -0,0 +1,90 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "strconv" + + "fbc/ent/entc/integration/ent/comment" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// CommentCreate is the builder for creating a Comment entity. +type CommentCreate struct { + config +} + +// Save creates the Comment in the database. +func (cc *CommentCreate) Save(ctx context.Context) (*Comment, error) { + switch cc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cc.sqlSave(ctx) + case dialect.Neptune: + return cc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (cc *CommentCreate) SaveX(ctx context.Context) *Comment { + v, err := cc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (cc *CommentCreate) sqlSave(ctx context.Context) (*Comment, error) { + var ( + res sql.Result + c = &Comment{config: cc.config} + ) + tx, err := cc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(comment.Table).Default(cc.driver.Dialect()) + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + c.ID = strconv.FormatInt(id, 10) + if err := tx.Commit(); err != nil { + return nil, err + } + return c, nil +} + +func (cc *CommentCreate) gremlinSave(ctx context.Context) (*Comment, error) { + res := &gremlin.Response{} + query, bindings := cc.gremlin().Query() + if err := cc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + c := &Comment{config: cc.config} + if err := c.FromResponse(res); err != nil { + return nil, err + } + return c, nil +} + +func (cc *CommentCreate) gremlin() *dsl.Traversal { + v := g.AddV(comment.Label) + return v.ValueMap(true) +} diff --git a/entc/integration/ent/comment_delete.go b/entc/integration/ent/comment_delete.go new file mode 100644 index 000000000..bb21d16d3 --- /dev/null +++ b/entc/integration/ent/comment_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/comment" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// CommentDelete is the builder for deleting a Comment entity. +type CommentDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (cd *CommentDelete) Where(ps ...ent.Predicate) *CommentDelete { + cd.predicates = append(cd.predicates, ps...) + return cd +} + +// Exec executes the deletion query. +func (cd *CommentDelete) Exec(ctx context.Context) error { + switch cd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cd.sqlExec(ctx) + case dialect.Neptune: + return cd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cd *CommentDelete) ExecX(ctx context.Context) { + if err := cd.Exec(ctx); err != nil { + panic(err) + } +} + +func (cd *CommentDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(comment.Table)) + for _, p := range cd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(comment.Table).FromSelect(selector).Query() + return cd.driver.Exec(ctx, query, args, &res) +} + +func (cd *CommentDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := cd.gremlin().Query() + return cd.driver.Exec(ctx, query, bindings, res) +} + +func (cd *CommentDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(comment.Label) + for _, p := range cd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// CommentDeleteOne is the builder for deleting a single Comment entity. +type CommentDeleteOne struct { + cd *CommentDelete +} + +// Exec executes the deletion query. +func (cdo *CommentDeleteOne) Exec(ctx context.Context) error { + return cdo.cd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cdo *CommentDeleteOne) ExecX(ctx context.Context) { + cdo.cd.ExecX(ctx) +} diff --git a/entc/integration/ent/comment_query.go b/entc/integration/ent/comment_query.go new file mode 100644 index 000000000..a8b5361f1 --- /dev/null +++ b/entc/integration/ent/comment_query.go @@ -0,0 +1,585 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/comment" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// CommentQuery is the builder for querying Comment entities. +type CommentQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (cq *CommentQuery) Where(ps ...ent.Predicate) *CommentQuery { + cq.predicates = append(cq.predicates, ps...) + return cq +} + +// Limit adds a limit step to the query. +func (cq *CommentQuery) Limit(limit int) *CommentQuery { + cq.limit = &limit + return cq +} + +// Order adds an order step to the query. +func (cq *CommentQuery) Order(o ...Order) *CommentQuery { + cq.order = append(cq.order, o...) + return cq +} + +// Get returns a Comment entity by its id. +func (cq *CommentQuery) Get(ctx context.Context, id string) (*Comment, error) { + return cq.Where(comment.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (cq *CommentQuery) GetX(ctx context.Context, id string) *Comment { + c, err := cq.Get(ctx, id) + if err != nil { + panic(err) + } + return c +} + +// First returns the first Comment entity in the query. Returns *ErrNotFound when no comment was found. +func (cq *CommentQuery) First(ctx context.Context) (*Comment, error) { + cs, err := cq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(cs) == 0 { + return nil, &ErrNotFound{comment.Label} + } + return cs[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cq *CommentQuery) FirstX(ctx context.Context) *Comment { + c, err := cq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return c +} + +// FirstID returns the first Comment id in the query. Returns *ErrNotFound when no id was found. +func (cq *CommentQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = cq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{comment.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (cq *CommentQuery) FirstXID(ctx context.Context) string { + id, err := cq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Comment entity in the query, returns an error if not exactly one entity was returned. +func (cq *CommentQuery) Only(ctx context.Context) (*Comment, error) { + cs, err := cq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(cs) { + case 1: + return cs[0], nil + case 0: + return nil, &ErrNotFound{comment.Label} + default: + return nil, &ErrNotSingular{comment.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cq *CommentQuery) OnlyX(ctx context.Context) *Comment { + c, err := cq.Only(ctx) + if err != nil { + panic(err) + } + return c +} + +// OnlyID returns the only Comment id in the query, returns an error if not exactly one id was returned. +func (cq *CommentQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = cq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{comment.Label} + default: + err = &ErrNotSingular{comment.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (cq *CommentQuery) OnlyXID(ctx context.Context) string { + id, err := cq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Comments. +func (cq *CommentQuery) All(ctx context.Context) ([]*Comment, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlAll(ctx) + case dialect.Neptune: + return cq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (cq *CommentQuery) AllX(ctx context.Context) []*Comment { + cs, err := cq.All(ctx) + if err != nil { + panic(err) + } + return cs +} + +// IDs executes the query and returns a list of Comment ids. +func (cq *CommentQuery) IDs(ctx context.Context) ([]string, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlIDs(ctx) + case dialect.Neptune: + return cq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (cq *CommentQuery) IDsX(ctx context.Context) []string { + ids, err := cq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cq *CommentQuery) Count(ctx context.Context) (int, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlCount(ctx) + case dialect.Neptune: + return cq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (cq *CommentQuery) CountX(ctx context.Context) int { + count, err := cq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cq *CommentQuery) Exist(ctx context.Context) (bool, error) { + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cq.sqlExist(ctx) + case dialect.Neptune: + return cq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cq *CommentQuery) ExistX(ctx context.Context) bool { + exist, err := cq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +func (cq *CommentQuery) GroupBy(field string, fields ...string) *CommentGroupBy { + group := &CommentGroupBy{config: cq.config} + group.fields = append([]string{field}, fields...) + switch cq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = cq.sqlQuery() + case dialect.Neptune: + group.gremlin = cq.gremlinQuery() + } + return group +} + +func (cq *CommentQuery) sqlAll(ctx context.Context) ([]*Comment, error) { + rows := &sql.Rows{} + selector := cq.sqlQuery() + if unique := cq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := cq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var cs Comments + if err := cs.FromRows(rows); err != nil { + return nil, err + } + cs.config(cq.config) + return cs, nil +} + +func (cq *CommentQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := cq.sqlQuery() + unique := []string{comment.FieldID} + if len(cq.unique) > 0 { + unique = cq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := cq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (cq *CommentQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := cq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (cq *CommentQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := cq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (cq *CommentQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(comment.Table) + selector := sql.Select(t1.Columns(comment.Columns...)...).From(t1) + if cq.sql != nil { + selector = cq.sql + selector.Select(selector.Columns(comment.Columns...)...) + } + for _, p := range cq.predicates { + p.SQL(selector) + } + for _, p := range cq.order { + p.SQL(selector) + } + if limit := cq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (cq *CommentQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (cq *CommentQuery) gremlinAll(ctx context.Context) ([]*Comment, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().ValueMap(true).Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var cs Comments + if err := cs.FromResponse(res); err != nil { + return nil, err + } + cs.config(cq.config) + return cs, nil +} + +func (cq *CommentQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().Count().Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (cq *CommentQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := cq.gremlinQuery().HasNext().Query() + if err := cq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (cq *CommentQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(comment.Label) + if cq.gremlin != nil { + v = cq.gremlin.Clone() + } + for _, p := range cq.predicates { + p.Gremlin(v) + } + if len(cq.order) > 0 { + v.Order() + for _, p := range cq.order { + p.Gremlin(v) + } + } + if limit := cq.limit; limit != nil { + v.Limit(*limit) + } + if unique := cq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// CommentQuery is the builder for group-by Comment entities. +type CommentGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *CommentGroupBy) Aggregate(fns ...Aggregate) *CommentGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (cgb *CommentGroupBy) Scan(ctx context.Context, v interface{}) error { + switch cgb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cgb.sqlScan(ctx, v) + case dialect.Neptune: + return cgb.gremlinScan(ctx, v) + default: + return errors.New("cgb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (cgb *CommentGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := cgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (cgb *CommentGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CommentGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (cgb *CommentGroupBy) StringsX(ctx context.Context) []string { + v, err := cgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (cgb *CommentGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CommentGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (cgb *CommentGroupBy) IntsX(ctx context.Context) []int { + v, err := cgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (cgb *CommentGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CommentGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (cgb *CommentGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := cgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (cgb *CommentGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(cgb.fields) > 1 { + return nil, errors.New("ent: CommentGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := cgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (cgb *CommentGroupBy) BoolsX(ctx context.Context) []bool { + v, err := cgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (cgb *CommentGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := cgb.sqlQuery().Query() + if err := cgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (cgb *CommentGroupBy) sqlQuery() *sql.Selector { + selector := cgb.sql + columns := make([]string, 0, len(cgb.fields)+len(cgb.fns)) + columns = append(columns, cgb.fields...) + for _, fn := range cgb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(cgb.fields...) +} + +func (cgb *CommentGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := cgb.gremlinQuery().Query() + if err := cgb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(cgb.fields)+len(cgb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (cgb *CommentGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range cgb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range cgb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return cgb.gremlin.Group(). + By(__.Values(cgb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/comment_update.go b/entc/integration/ent/comment_update.go new file mode 100644 index 000000000..f896a9c8f --- /dev/null +++ b/entc/integration/ent/comment_update.go @@ -0,0 +1,231 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/comment" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// CommentUpdate is the builder for updating Comment entities. +type CommentUpdate struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (cu *CommentUpdate) Where(ps ...ent.Predicate) *CommentUpdate { + cu.predicates = append(cu.predicates, ps...) + return cu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (cu *CommentUpdate) Save(ctx context.Context) (int, error) { + switch cu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := cu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (cu *CommentUpdate) SaveX(ctx context.Context) int { + affected, err := cu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cu *CommentUpdate) Exec(ctx context.Context) error { + _, err := cu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cu *CommentUpdate) ExecX(ctx context.Context) { + if err := cu.Exec(ctx); err != nil { + panic(err) + } +} + +func (cu *CommentUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(comment.FieldID).From(sql.Table(comment.Table)) + for _, p := range cu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = cu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := cu.driver.Tx(ctx) + if err != nil { + return 0, err + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (cu *CommentUpdate) gremlinSave(ctx context.Context) ([]*Comment, error) { + res := &gremlin.Response{} + query, bindings := cu.gremlin().Query() + if err := cu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var cs Comments + cs.config(cu.config) + if err := cs.FromResponse(res); err != nil { + return nil, err + } + return cs, nil +} + +func (cu *CommentUpdate) gremlin() *dsl.Traversal { + v := g.V().HasLabel(comment.Label) + for _, p := range cu.predicates { + p.Gremlin(v) + } + var ( + trs []*dsl.Traversal + ) + v.ValueMap(true) + trs = append(trs, v) + return dsl.Join(trs...) +} + +// CommentUpdateOne is the builder for updating a single Comment entity. +type CommentUpdateOne struct { + config + id string +} + +// Save executes the query and returns the updated entity. +func (cuo *CommentUpdateOne) Save(ctx context.Context) (*Comment, error) { + switch cuo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return cuo.sqlSave(ctx) + case dialect.Neptune: + return cuo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (cuo *CommentUpdateOne) SaveX(ctx context.Context) *Comment { + c, err := cuo.Save(ctx) + if err != nil { + panic(err) + } + return c +} + +// Exec executes the query on the entity. +func (cuo *CommentUpdateOne) Exec(ctx context.Context) error { + _, err := cuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cuo *CommentUpdateOne) ExecX(ctx context.Context) { + if err := cuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (cuo *CommentUpdateOne) sqlSave(ctx context.Context) (c *Comment, err error) { + selector := sql.Select(comment.Columns...).From(sql.Table(comment.Table)) + comment.ID(cuo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = cuo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + c = &Comment{config: cuo.config} + if err := c.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into Comment: %v", err) + } + id = c.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: Comment not found with id: %v", cuo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one Comment with the same id: %v", cuo.id) + } + + tx, err := cuo.driver.Tx(ctx) + if err != nil { + return nil, err + } + if err = tx.Commit(); err != nil { + return nil, err + } + return c, nil +} + +func (cuo *CommentUpdateOne) gremlinSave(ctx context.Context) (*Comment, error) { + res := &gremlin.Response{} + query, bindings := cuo.gremlin(cuo.id).Query() + if err := cuo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + c := &Comment{config: cuo.config} + if err := c.FromResponse(res); err != nil { + return nil, err + } + return c, nil +} + +func (cuo *CommentUpdateOne) gremlin(id string) *dsl.Traversal { + v := g.V(id) + var ( + trs []*dsl.Traversal + ) + v.ValueMap(true) + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/config.go b/entc/integration/ent/config.go new file mode 100644 index 000000000..53f33835c --- /dev/null +++ b/entc/integration/ent/config.go @@ -0,0 +1,51 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "fbc/ent/dialect" +) + +// Option function to configure the client. +type Option func(*config) + +// Config is the configuration for the client and its builder. +type config struct { + // driver is the driver used for execute database requests. + driver dialect.Driver + // verbose enable a verbosity logging. + verbose bool + // log used for logging on verbose mode. + log func(...interface{}) +} + +// Options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.verbose { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Verbose sets the client logging to verbose. +func Verbose() Option { + return func(c *config) { + c.verbose = true + } +} + +// Log sets the client logging to verbose. +func Log(fn func(...interface{})) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} diff --git a/entc/integration/ent/ent.go b/entc/integration/ent/ent.go new file mode 100644 index 000000000..87b29d7c1 --- /dev/null +++ b/entc/integration/ent/ent.go @@ -0,0 +1,349 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strconv" + "strings" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/encoding/graphson" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" +) + +// Predicate is an alias to ent.Predicate. +type Predicate = ent.Predicate + +// Or groups list of predicates with the or operator between them. +func Or(predicates ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + for i, p := range predicates { + if i > 0 { + s.Or() + } + p.SQL(s) + } + }, + Gremlin: func(tr *dsl.Traversal) { + trs := make([]interface{}, 0, len(predicates)) + for _, p := range predicates { + t := __.New() + p.Gremlin(t) + trs = append(trs, t) + } + tr.Where(__.Or(trs...)) + }, + } +} + +// Not applies the not operator on the given predicate. +func Not(p ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + p.SQL(s.Not()) + }, + Gremlin: func(tr *dsl.Traversal) { + t := __.New() + p.Gremlin(t) + tr.Where(__.Not(t)) + }, + } +} + +// Order applies an ordering on the traversal. +type Order ent.Predicate + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) Order { + return Order{ + SQL: func(s *sql.Selector) { + for _, f := range fields { + s.OrderBy(sql.Asc(f)) + } + }, + Gremlin: func(tr *dsl.Traversal) { + for _, f := range fields { + tr.By(f, dsl.Incr) + } + }, + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) Order { + return Order{ + SQL: func(s *sql.Selector) { + for _, f := range fields { + s.OrderBy(sql.Desc(f)) + } + }, + Gremlin: func(tr *dsl.Traversal) { + for _, f := range fields { + tr.By(f, dsl.Decr) + } + }, + } +} + +// Aggregate applies an aggregation step on the group-by traversal/selector. +type Aggregate struct { + // SQL the column wrapped with the aggregation function. + SQL func(*sql.Selector) string + // Gremlin gets two labels as parameters. The first used in the `As` step for the predicate, + // and the second is an optional name for the next predicates (or for later usage). + Gremlin func(string, string) (string, *dsl.Traversal) +} + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +// +func As(fn Aggregate, end string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.As(fn.SQL(s), end) + }, + Gremlin: func(start, _ string) (string, *dsl.Traversal) { + return fn.Gremlin(start, end) + }, + } +} + +// DefaultCountLabel is the default label name for the Count aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "count" 2 or more fields and avoid conflicting, use the `ent.As(ent.Count(field), "custom_name")` +// function with custom name in order to override it. +const DefaultCountLabel = "count" + +// Count applies the "count" aggregation function on each group. +func Count() Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Count("*") + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultCountLabel + } + return end, __.As(start).Count(dsl.Local).As(end) + }, + } +} + +// DefaultMaxLabel is the default label name for the Max aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "max" 2 or more fields and avoid conflicting, use the `ent.As(ent.Max(field), "custom_name")` +// function with custom name in order to override it. +const DefaultMaxLabel = "max" + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Max(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultMaxLabel + } + return end, __.As(start).Unfold().Values(field).Max().As(end) + }, + } +} + +// DefaultMeanLabel is the default label name for the Mean aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "mean" 2 or more fields and avoid conflicting, use the `ent.As(ent.Mean(field), "custom_name")` +// function with custom name in order to override it. +const DefaultMeanLabel = "mean" + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Avg(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultMeanLabel + } + return end, __.As(start).Unfold().Values(field).Mean().As(end) + }, + } +} + +// DefaultMinLabel is the default label name for the Min aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "min" 2 or more fields and avoid conflicting, use the `ent.As(ent.Min(field), "custom_name")` +// function with custom name in order to override it. +const DefaultMinLabel = "min" + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Min(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultMinLabel + } + return end, __.As(start).Unfold().Values(field).Min().As(end) + }, + } +} + +// DefaultSumLabel is the default label name for the Sum aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "sum" 2 or more fields and avoid conflicting, use the `ent.As(ent.Sum(field), "custom_name")` +// function with custom name in order to override it. +const DefaultSumLabel = "sum" + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Sum(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultSumLabel + } + return end, __.As(start).Unfold().Values(field).Sum().As(end) + }, + } +} + +// ErrNotFound returns when trying to fetch a specific entity and it was not found in the database. +type ErrNotFound struct { + label string +} + +// Error implements the error interface. +func (e *ErrNotFound) Error() string { + return fmt.Sprintf("ent: %s not found", e.label) +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + _, ok := err.(*ErrNotFound) + return ok +} + +// ErrNotSingular returns when trying to fetch a singular entity and more then one was found in the database. +type ErrNotSingular struct { + label string +} + +// Error implements the error interface. +func (e *ErrNotSingular) Error() string { + return fmt.Sprintf("ent: %s not singular", e.label) +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + _, ok := err.(*ErrNotSingular) + return ok +} + +// ErrConstraintFailed returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or field uniqueness. +type ErrConstraintFailed struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ErrConstraintFailed) Error() string { + return fmt.Sprintf("ent: unique constraint failed: %s", e.msg) +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ErrConstraintFailed) Unwrap() error { + return e.wrap +} + +// Code implements the dsl.Node interface. +func (e ErrConstraintFailed) Code() (string, []interface{}) { + return strconv.Quote(e.prefix() + e.msg), nil +} + +func (e *ErrConstraintFailed) UnmarshalGraphson(b []byte) error { + var v [1]*string + if err := graphson.Unmarshal(b, &v); err != nil { + return err + } + if v[0] == nil { + return fmt.Errorf("ent: missing string value") + } + if !strings.HasPrefix(*v[0], e.prefix()) { + return fmt.Errorf("ent: invalid string for error: %s", *v[0]) + } + e.msg = strings.TrimPrefix(*v[0], e.prefix()) + return nil +} + +// prefix returns the prefix used for gremlin constants. +func (ErrConstraintFailed) prefix() string { return "Error: " } + +// NewErrUniqueField creates a constraint error for unique fields. +func NewErrUniqueField(label, field string, v interface{}) *ErrConstraintFailed { + return &ErrConstraintFailed{msg: fmt.Sprintf("field %s.%s with value: %#v", label, field, v)} +} + +// NewErrUniqueEdge creates a constraint error for unique edges. +func NewErrUniqueEdge(label, edge, id string) *ErrConstraintFailed { + return &ErrConstraintFailed{msg: fmt.Sprintf("edge %s.%s with id: %#v", label, edge, id)} +} + +// IsConstraintFailure returns a boolean indicating whether the error is a constraint failure. +func IsConstraintFailure(err error) bool { + _, ok := err.(*ErrConstraintFailed) + return ok +} + +// isConstantError indicates if the given response holds a gremlin constant containing an error. +func isConstantError(r *gremlin.Response) (*ErrConstraintFailed, bool) { + e := &ErrConstraintFailed{} + if err := graphson.Unmarshal(r.Result.Data, e); err != nil { + return nil, false + } + return e, true +} + +func isSQLConstraintError(err error) (*ErrConstraintFailed, bool) { + // Error number 1062 is ER_DUP_ENTRY in mysql, and "UNIQUE constraint failed" is SQLite prefix. + if msg := err.Error(); strings.HasPrefix(msg, "Error 1062") || strings.HasPrefix(msg, "UNIQUE constraint failed") { + return &ErrConstraintFailed{msg, err}, true + } + return nil, false +} + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%s: %v", err.Error(), rerr) + } + if err, ok := isSQLConstraintError(err); ok { + return err + } + return err +} + +// keys returns the keys/ids from the edge map. +func keys(m map[string]struct{}) []string { + s := make([]string, 0, len(m)) + for id, _ := range m { + s = append(s, id) + } + return s +} diff --git a/entc/integration/ent/example_test.go b/entc/integration/ent/example_test.go new file mode 100644 index 000000000..1c282a4d5 --- /dev/null +++ b/entc/integration/ent/example_test.go @@ -0,0 +1,411 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "log" + "net/url" + "os" + "time" + + "fbc/ent/dialect" + "fbc/lib/go/gremlin" +) + +// endpoint for the database. In order to run the tests locally, run the following command: +// +// ENT_INTEGRATION_ENDPOINT="http://localhost:8182" go test -v +// +var endpoint *gremlin.Endpoint + +func init() { + if e, ok := os.LookupEnv("ENT_INTEGRATION_ENDPOINT"); ok { + if u, err := url.Parse(e); err == nil { + endpoint = &gremlin.Endpoint{u} + } + } +} + +func ExampleCard() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the card's edges. + + // create card vertex with its edges. + c := client.Card. + Create(). + SetNumber("string"). + SaveX(ctx) + log.Println("card created:", c) + + // query edges. + + // Output: +} +func ExampleComment() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the comment's edges. + + // create comment vertex with its edges. + c := client.Comment. + Create(). + SaveX(ctx) + log.Println("comment created:", c) + + // query edges. + + // Output: +} +func ExampleFile() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the file's edges. + + // create file vertex with its edges. + f := client.File. + Create(). + SetSize(1). + SetName("string"). + SaveX(ctx) + log.Println("file created:", f) + + // query edges. + + // Output: +} +func ExampleGroup() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the group's edges. + f0 := client.File. + Create(). + SetSize(1). + SetName("string"). + SaveX(ctx) + log.Println("file created:", f0) + u1 := client.User. + Create(). + SetAge(1). + SetName("string"). + SetLast("string"). + SetNickname("string"). + SetPhone("string"). + SaveX(ctx) + log.Println("user created:", u1) + gi3 := client.GroupInfo. + Create(). + SetDesc("string"). + SetMaxUsers(1). + SaveX(ctx) + log.Println("groupinfo created:", gi3) + + // create group vertex with its edges. + gr := client.Group. + Create(). + SetActive(true). + SetExpire(time.Now()). + SetType("string"). + SetMaxUsers(1). + SetName("string"). + AddFiles(f0). + AddBlocked(u1). + SetInfo(gi3). + SaveX(ctx) + log.Println("group created:", gr) + + // query edges. + f0, err = gr.QueryFiles().First(ctx) + if err != nil { + log.Fatalf("failed querying files: %v", err) + } + log.Println("files found:", f0) + + u1, err = gr.QueryBlocked().First(ctx) + if err != nil { + log.Fatalf("failed querying blocked: %v", err) + } + log.Println("blocked found:", u1) + + gi3, err = gr.QueryInfo().First(ctx) + if err != nil { + log.Fatalf("failed querying info: %v", err) + } + log.Println("info found:", gi3) + + // Output: +} +func ExampleGroupInfo() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the groupinfo's edges. + + // create groupinfo vertex with its edges. + gi := client.GroupInfo. + Create(). + SetDesc("string"). + SetMaxUsers(1). + SaveX(ctx) + log.Println("groupinfo created:", gi) + + // query edges. + + // Output: +} +func ExampleNode() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the node's edges. + n1 := client.Node. + Create(). + SetValue(1). + SaveX(ctx) + log.Println("node created:", n1) + + // create node vertex with its edges. + n := client.Node. + Create(). + SetValue(1). + SetNext(n1). + SaveX(ctx) + log.Println("node created:", n) + + // query edges. + + n1, err = n.QueryNext().First(ctx) + if err != nil { + log.Fatalf("failed querying next: %v", err) + } + log.Println("next found:", n1) + + // Output: +} +func ExamplePet() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the pet's edges. + + // create pet vertex with its edges. + pe := client.Pet. + Create(). + SetName("string"). + SaveX(ctx) + log.Println("pet created:", pe) + + // query edges. + + // Output: +} +func ExampleUser() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the user's edges. + c0 := client.Card. + Create(). + SetNumber("string"). + SaveX(ctx) + log.Println("card created:", c0) + pe1 := client.Pet. + Create(). + SetName("string"). + SaveX(ctx) + log.Println("pet created:", pe1) + f2 := client.File. + Create(). + SetSize(1). + SetName("string"). + SaveX(ctx) + log.Println("file created:", f2) + gr3 := client.Group. + Create(). + SetActive(true). + SetExpire(time.Now()). + SetType("string"). + SetMaxUsers(1). + SetName("string"). + SaveX(ctx) + log.Println("group created:", gr3) + u4 := client.User. + Create(). + SetAge(1). + SetName("string"). + SetLast("string"). + SetNickname("string"). + SetPhone("string"). + SaveX(ctx) + log.Println("user created:", u4) + u6 := client.User. + Create(). + SetAge(1). + SetName("string"). + SetLast("string"). + SetNickname("string"). + SetPhone("string"). + SaveX(ctx) + log.Println("user created:", u6) + pe7 := client.Pet. + Create(). + SetName("string"). + SaveX(ctx) + log.Println("pet created:", pe7) + u8 := client.User. + Create(). + SetAge(1). + SetName("string"). + SetLast("string"). + SetNickname("string"). + SetPhone("string"). + SaveX(ctx) + log.Println("user created:", u8) + u10 := client.User. + Create(). + SetAge(1). + SetName("string"). + SetLast("string"). + SetNickname("string"). + SetPhone("string"). + SaveX(ctx) + log.Println("user created:", u10) + + // create user vertex with its edges. + u := client.User. + Create(). + SetAge(1). + SetName("string"). + SetLast("string"). + SetNickname("string"). + SetPhone("string"). + SetCard(c0). + AddPets(pe1). + AddFiles(f2). + AddGroups(gr3). + AddFriends(u4). + AddFollowing(u6). + SetTeam(pe7). + SetSpouse(u8). + SetParent(u10). + SaveX(ctx) + log.Println("user created:", u) + + // query edges. + c0, err = u.QueryCard().First(ctx) + if err != nil { + log.Fatalf("failed querying card: %v", err) + } + log.Println("card found:", c0) + + pe1, err = u.QueryPets().First(ctx) + if err != nil { + log.Fatalf("failed querying pets: %v", err) + } + log.Println("pets found:", pe1) + + f2, err = u.QueryFiles().First(ctx) + if err != nil { + log.Fatalf("failed querying files: %v", err) + } + log.Println("files found:", f2) + + gr3, err = u.QueryGroups().First(ctx) + if err != nil { + log.Fatalf("failed querying groups: %v", err) + } + log.Println("groups found:", gr3) + + u4, err = u.QueryFriends().First(ctx) + if err != nil { + log.Fatalf("failed querying friends: %v", err) + } + log.Println("friends found:", u4) + + u6, err = u.QueryFollowing().First(ctx) + if err != nil { + log.Fatalf("failed querying following: %v", err) + } + log.Println("following found:", u6) + + pe7, err = u.QueryTeam().First(ctx) + if err != nil { + log.Fatalf("failed querying team: %v", err) + } + log.Println("team found:", pe7) + + u8, err = u.QuerySpouse().First(ctx) + if err != nil { + log.Fatalf("failed querying spouse: %v", err) + } + log.Println("spouse found:", u8) + + u10, err = u.QueryParent().First(ctx) + if err != nil { + log.Fatalf("failed querying parent: %v", err) + } + log.Println("parent found:", u10) + + // Output: +} diff --git a/entc/integration/ent/file.go b/entc/integration/ent/file.go new file mode 100644 index 000000000..1d294d45d --- /dev/null +++ b/entc/integration/ent/file.go @@ -0,0 +1,146 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// File is the model entity for the File schema. +type File struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Size holds the value of the "size" field. + Size int `json:"size,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` +} + +// FromResponse scans the gremlin response data into File. +func (f *File) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vf struct { + ID string `json:"id,omitempty"` + Size int `json:"size,omitempty"` + Name string `json:"name,omitempty"` + } + if err := vmap.Decode(&vf); err != nil { + return err + } + f.ID = vf.ID + f.Size = vf.Size + f.Name = vf.Name + return nil +} + +// FromRows scans the sql response data into File. +func (f *File) FromRows(rows *sql.Rows) error { + var vf struct { + ID int + Size int + Name string + } + // the order here should be the same as in the `file.Columns`. + if err := rows.Scan( + &vf.ID, + &vf.Size, + &vf.Name, + ); err != nil { + return err + } + f.ID = strconv.Itoa(vf.ID) + f.Size = vf.Size + f.Name = vf.Name + return nil +} + +// Update returns a builder for updating this File. +// Note that, you need to call File.Unwrap() before calling this method, if this File +// was returned from a transaction, and the transaction was committed or rolled back. +func (f *File) Update() *FileUpdateOne { + return (&FileClient{f.config}).UpdateOne(f) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (f *File) Unwrap() *File { + tx, ok := f.config.driver.(*txDriver) + if !ok { + panic("ent: File is not a transactional entity") + } + f.config.driver = tx.drv + return f +} + +// String implements the fmt.Stringer. +func (f *File) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("File(") + buf.WriteString(fmt.Sprintf("id=%v,", f.ID)) + buf.WriteString(fmt.Sprintf("size=%v", f.Size)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("name=%v", f.Name)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (f *File) id() int { + id, _ := strconv.Atoi(f.ID) + return id +} + +// Files is a parsable slice of File. +type Files []*File + +// FromResponse scans the gremlin response data into Files. +func (f *Files) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vf []struct { + ID string `json:"id,omitempty"` + Size int `json:"size,omitempty"` + Name string `json:"name,omitempty"` + } + if err := vmap.Decode(&vf); err != nil { + return err + } + for _, v := range vf { + *f = append(*f, &File{ + ID: v.ID, + Size: v.Size, + Name: v.Name, + }) + } + return nil +} + +// FromRows scans the sql response data into Files. +func (f *Files) FromRows(rows *sql.Rows) error { + for rows.Next() { + vf := &File{} + if err := vf.FromRows(rows); err != nil { + return err + } + *f = append(*f, vf) + } + return nil +} + +func (f Files) config(cfg config) { + for i := range f { + f[i].config = cfg + } +} diff --git a/entc/integration/ent/file/file.go b/entc/integration/ent/file/file.go new file mode 100644 index 000000000..b8023cbca --- /dev/null +++ b/entc/integration/ent/file/file.go @@ -0,0 +1,33 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package file + +import ( + "fbc/ent/entc/integration/ent/schema" +) + +const ( + // Label holds the string label denoting the file type in the database. + Label = "file" + // FieldSize holds the string denoting the size vertex property in the database. + FieldSize = "size" + // FieldName holds the string denoting the name vertex property in the database. + FieldName = "name" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the file in the database. + Table = "files" +) + +// Columns holds all SQL columns are file fields. +var Columns = []string{ + FieldID, + FieldSize, + FieldName, +} + +var ( + fields = schema.File{}.Fields() + // SizeValidator is a validator for the "size" field. It is called by the builders before save. + SizeValidator = fields[0].Validators()[0].(func(int) error) +) diff --git a/entc/integration/ent/file/where.go b/entc/integration/ent/file/where.go new file mode 100644 index 000000000..fdb466b62 --- /dev/null +++ b/entc/integration/ent/file/where.go @@ -0,0 +1,448 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package file + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Size applies equality check predicate on the "size" field. It's identical to SizeEQ. +func Size(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.EQ(v)) + }, + } +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// SizeEQ applies the EQ predicate on the "size" field. +func SizeEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.EQ(v)) + }, + } +} + +// SizeNEQ applies the NEQ predicate on the "size" field. +func SizeNEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.NEQ(v)) + }, + } +} + +// SizeGT applies the GT predicate on the "size" field. +func SizeGT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.GT(v)) + }, + } +} + +// SizeGTE applies the GTE predicate on the "size" field. +func SizeGTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.GTE(v)) + }, + } +} + +// SizeLT applies the LT predicate on the "size" field. +func SizeLT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.LT(v)) + }, + } +} + +// SizeLTE applies the LTE predicate on the "size" field. +func SizeLTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSize), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.LTE(v)) + }, + } +} + +// SizeIn applies the In predicate on the "size" field. +func SizeIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSize), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.Within(v...)) + }, + } +} + +// SizeNotIn applies the NotIn predicate on the "size" field. +func SizeNotIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSize), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldSize, p.Without(v...)) + }, + } +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.NEQ(v)) + }, + } +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GT(v)) + }, + } +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GTE(v)) + }, + } +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LT(v)) + }, + } +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LTE(v)) + }, + } +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Within(v...)) + }, + } +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Without(v...)) + }, + } +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Containing(v)) + }, + } +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.StartingWith(v)) + }, + } +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EndingWith(v)) + }, + } +} diff --git a/entc/integration/ent/file_create.go b/entc/integration/ent/file_create.go new file mode 100644 index 000000000..34059da30 --- /dev/null +++ b/entc/integration/ent/file_create.go @@ -0,0 +1,128 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/file" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// FileCreate is the builder for creating a File entity. +type FileCreate struct { + config + size *int + name *string +} + +// SetSize sets the size field. +func (fc *FileCreate) SetSize(i int) *FileCreate { + fc.size = &i + return fc +} + +// SetName sets the name field. +func (fc *FileCreate) SetName(s string) *FileCreate { + fc.name = &s + return fc +} + +// Save creates the File in the database. +func (fc *FileCreate) Save(ctx context.Context) (*File, error) { + if fc.size == nil { + return nil, errors.New("ent: missing required field \"size\"") + } + if err := file.SizeValidator(*fc.size); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"size\": %v", err) + } + if fc.name == nil { + return nil, errors.New("ent: missing required field \"name\"") + } + switch fc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fc.sqlSave(ctx) + case dialect.Neptune: + return fc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (fc *FileCreate) SaveX(ctx context.Context) *File { + v, err := fc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (fc *FileCreate) sqlSave(ctx context.Context) (*File, error) { + var ( + res sql.Result + f = &File{config: fc.config} + ) + tx, err := fc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(file.Table).Default(fc.driver.Dialect()) + if fc.size != nil { + builder.Set(file.FieldSize, *fc.size) + f.Size = *fc.size + } + if fc.name != nil { + builder.Set(file.FieldName, *fc.name) + f.Name = *fc.name + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + f.ID = strconv.FormatInt(id, 10) + if err := tx.Commit(); err != nil { + return nil, err + } + return f, nil +} + +func (fc *FileCreate) gremlinSave(ctx context.Context) (*File, error) { + res := &gremlin.Response{} + query, bindings := fc.gremlin().Query() + if err := fc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + f := &File{config: fc.config} + if err := f.FromResponse(res); err != nil { + return nil, err + } + return f, nil +} + +func (fc *FileCreate) gremlin() *dsl.Traversal { + v := g.AddV(file.Label) + if fc.size != nil { + v.Property(dsl.Single, file.FieldSize, *fc.size) + } + if fc.name != nil { + v.Property(dsl.Single, file.FieldName, *fc.name) + } + return v.ValueMap(true) +} diff --git a/entc/integration/ent/file_delete.go b/entc/integration/ent/file_delete.go new file mode 100644 index 000000000..337bbcefc --- /dev/null +++ b/entc/integration/ent/file_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/file" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// FileDelete is the builder for deleting a File entity. +type FileDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (fd *FileDelete) Where(ps ...ent.Predicate) *FileDelete { + fd.predicates = append(fd.predicates, ps...) + return fd +} + +// Exec executes the deletion query. +func (fd *FileDelete) Exec(ctx context.Context) error { + switch fd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fd.sqlExec(ctx) + case dialect.Neptune: + return fd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (fd *FileDelete) ExecX(ctx context.Context) { + if err := fd.Exec(ctx); err != nil { + panic(err) + } +} + +func (fd *FileDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(file.Table)) + for _, p := range fd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(file.Table).FromSelect(selector).Query() + return fd.driver.Exec(ctx, query, args, &res) +} + +func (fd *FileDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := fd.gremlin().Query() + return fd.driver.Exec(ctx, query, bindings, res) +} + +func (fd *FileDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(file.Label) + for _, p := range fd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// FileDeleteOne is the builder for deleting a single File entity. +type FileDeleteOne struct { + fd *FileDelete +} + +// Exec executes the deletion query. +func (fdo *FileDeleteOne) Exec(ctx context.Context) error { + return fdo.fd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (fdo *FileDeleteOne) ExecX(ctx context.Context) { + fdo.fd.ExecX(ctx) +} diff --git a/entc/integration/ent/file_query.go b/entc/integration/ent/file_query.go new file mode 100644 index 000000000..65d33c4ed --- /dev/null +++ b/entc/integration/ent/file_query.go @@ -0,0 +1,598 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/file" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// FileQuery is the builder for querying File entities. +type FileQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (fq *FileQuery) Where(ps ...ent.Predicate) *FileQuery { + fq.predicates = append(fq.predicates, ps...) + return fq +} + +// Limit adds a limit step to the query. +func (fq *FileQuery) Limit(limit int) *FileQuery { + fq.limit = &limit + return fq +} + +// Order adds an order step to the query. +func (fq *FileQuery) Order(o ...Order) *FileQuery { + fq.order = append(fq.order, o...) + return fq +} + +// Get returns a File entity by its id. +func (fq *FileQuery) Get(ctx context.Context, id string) (*File, error) { + return fq.Where(file.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (fq *FileQuery) GetX(ctx context.Context, id string) *File { + f, err := fq.Get(ctx, id) + if err != nil { + panic(err) + } + return f +} + +// First returns the first File entity in the query. Returns *ErrNotFound when no file was found. +func (fq *FileQuery) First(ctx context.Context) (*File, error) { + fs, err := fq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(fs) == 0 { + return nil, &ErrNotFound{file.Label} + } + return fs[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (fq *FileQuery) FirstX(ctx context.Context) *File { + f, err := fq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return f +} + +// FirstID returns the first File id in the query. Returns *ErrNotFound when no id was found. +func (fq *FileQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = fq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{file.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (fq *FileQuery) FirstXID(ctx context.Context) string { + id, err := fq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only File entity in the query, returns an error if not exactly one entity was returned. +func (fq *FileQuery) Only(ctx context.Context) (*File, error) { + fs, err := fq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(fs) { + case 1: + return fs[0], nil + case 0: + return nil, &ErrNotFound{file.Label} + default: + return nil, &ErrNotSingular{file.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (fq *FileQuery) OnlyX(ctx context.Context) *File { + f, err := fq.Only(ctx) + if err != nil { + panic(err) + } + return f +} + +// OnlyID returns the only File id in the query, returns an error if not exactly one id was returned. +func (fq *FileQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = fq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{file.Label} + default: + err = &ErrNotSingular{file.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (fq *FileQuery) OnlyXID(ctx context.Context) string { + id, err := fq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Files. +func (fq *FileQuery) All(ctx context.Context) ([]*File, error) { + switch fq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fq.sqlAll(ctx) + case dialect.Neptune: + return fq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (fq *FileQuery) AllX(ctx context.Context) []*File { + fs, err := fq.All(ctx) + if err != nil { + panic(err) + } + return fs +} + +// IDs executes the query and returns a list of File ids. +func (fq *FileQuery) IDs(ctx context.Context) ([]string, error) { + switch fq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fq.sqlIDs(ctx) + case dialect.Neptune: + return fq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (fq *FileQuery) IDsX(ctx context.Context) []string { + ids, err := fq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (fq *FileQuery) Count(ctx context.Context) (int, error) { + switch fq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fq.sqlCount(ctx) + case dialect.Neptune: + return fq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (fq *FileQuery) CountX(ctx context.Context) int { + count, err := fq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (fq *FileQuery) Exist(ctx context.Context) (bool, error) { + switch fq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fq.sqlExist(ctx) + case dialect.Neptune: + return fq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (fq *FileQuery) ExistX(ctx context.Context) bool { + exist, err := fq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Size int `json:"size,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.File.Query(). +// GroupBy(file.FieldSize). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (fq *FileQuery) GroupBy(field string, fields ...string) *FileGroupBy { + group := &FileGroupBy{config: fq.config} + group.fields = append([]string{field}, fields...) + switch fq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = fq.sqlQuery() + case dialect.Neptune: + group.gremlin = fq.gremlinQuery() + } + return group +} + +func (fq *FileQuery) sqlAll(ctx context.Context) ([]*File, error) { + rows := &sql.Rows{} + selector := fq.sqlQuery() + if unique := fq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := fq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var fs Files + if err := fs.FromRows(rows); err != nil { + return nil, err + } + fs.config(fq.config) + return fs, nil +} + +func (fq *FileQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := fq.sqlQuery() + unique := []string{file.FieldID} + if len(fq.unique) > 0 { + unique = fq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := fq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (fq *FileQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := fq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (fq *FileQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := fq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (fq *FileQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(file.Table) + selector := sql.Select(t1.Columns(file.Columns...)...).From(t1) + if fq.sql != nil { + selector = fq.sql + selector.Select(selector.Columns(file.Columns...)...) + } + for _, p := range fq.predicates { + p.SQL(selector) + } + for _, p := range fq.order { + p.SQL(selector) + } + if limit := fq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (fq *FileQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := fq.gremlinQuery().Query() + if err := fq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (fq *FileQuery) gremlinAll(ctx context.Context) ([]*File, error) { + res := &gremlin.Response{} + query, bindings := fq.gremlinQuery().ValueMap(true).Query() + if err := fq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var fs Files + if err := fs.FromResponse(res); err != nil { + return nil, err + } + fs.config(fq.config) + return fs, nil +} + +func (fq *FileQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := fq.gremlinQuery().Count().Query() + if err := fq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (fq *FileQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := fq.gremlinQuery().HasNext().Query() + if err := fq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (fq *FileQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(file.Label) + if fq.gremlin != nil { + v = fq.gremlin.Clone() + } + for _, p := range fq.predicates { + p.Gremlin(v) + } + if len(fq.order) > 0 { + v.Order() + for _, p := range fq.order { + p.Gremlin(v) + } + } + if limit := fq.limit; limit != nil { + v.Limit(*limit) + } + if unique := fq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// FileQuery is the builder for group-by File entities. +type FileGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (fgb *FileGroupBy) Aggregate(fns ...Aggregate) *FileGroupBy { + fgb.fns = append(fgb.fns, fns...) + return fgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (fgb *FileGroupBy) Scan(ctx context.Context, v interface{}) error { + switch fgb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fgb.sqlScan(ctx, v) + case dialect.Neptune: + return fgb.gremlinScan(ctx, v) + default: + return errors.New("fgb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (fgb *FileGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := fgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (fgb *FileGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(fgb.fields) > 1 { + return nil, errors.New("ent: FileGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := fgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (fgb *FileGroupBy) StringsX(ctx context.Context) []string { + v, err := fgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (fgb *FileGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(fgb.fields) > 1 { + return nil, errors.New("ent: FileGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := fgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (fgb *FileGroupBy) IntsX(ctx context.Context) []int { + v, err := fgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (fgb *FileGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(fgb.fields) > 1 { + return nil, errors.New("ent: FileGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := fgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (fgb *FileGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := fgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (fgb *FileGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(fgb.fields) > 1 { + return nil, errors.New("ent: FileGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := fgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (fgb *FileGroupBy) BoolsX(ctx context.Context) []bool { + v, err := fgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (fgb *FileGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := fgb.sqlQuery().Query() + if err := fgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (fgb *FileGroupBy) sqlQuery() *sql.Selector { + selector := fgb.sql + columns := make([]string, 0, len(fgb.fields)+len(fgb.fns)) + columns = append(columns, fgb.fields...) + for _, fn := range fgb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(fgb.fields...) +} + +func (fgb *FileGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := fgb.gremlinQuery().Query() + if err := fgb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(fgb.fields)+len(fgb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (fgb *FileGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range fgb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range fgb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return fgb.gremlin.Group(). + By(__.Values(fgb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/file_update.go b/entc/integration/ent/file_update.go new file mode 100644 index 000000000..2e5cf1296 --- /dev/null +++ b/entc/integration/ent/file_update.go @@ -0,0 +1,321 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/file" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// FileUpdate is the builder for updating File entities. +type FileUpdate struct { + config + size *int + name *string + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (fu *FileUpdate) Where(ps ...ent.Predicate) *FileUpdate { + fu.predicates = append(fu.predicates, ps...) + return fu +} + +// SetSize sets the size field. +func (fu *FileUpdate) SetSize(i int) *FileUpdate { + fu.size = &i + return fu +} + +// SetName sets the name field. +func (fu *FileUpdate) SetName(s string) *FileUpdate { + fu.name = &s + return fu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (fu *FileUpdate) Save(ctx context.Context) (int, error) { + if fu.size != nil { + if err := file.SizeValidator(*fu.size); err != nil { + return 0, fmt.Errorf("ent: validator failed for field \"size\": %v", err) + } + } + switch fu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := fu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (fu *FileUpdate) SaveX(ctx context.Context) int { + affected, err := fu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (fu *FileUpdate) Exec(ctx context.Context) error { + _, err := fu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (fu *FileUpdate) ExecX(ctx context.Context) { + if err := fu.Exec(ctx); err != nil { + panic(err) + } +} + +func (fu *FileUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(file.FieldID).From(sql.Table(file.Table)) + for _, p := range fu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = fu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := fu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(file.Table).Where(sql.InInts(file.FieldID, ids...)) + ) + if fu.size != nil { + update = true + builder.Set(file.FieldSize, *fu.size) + } + if fu.name != nil { + update = true + builder.Set(file.FieldName, *fu.name) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (fu *FileUpdate) gremlinSave(ctx context.Context) ([]*File, error) { + res := &gremlin.Response{} + query, bindings := fu.gremlin().Query() + if err := fu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var fs Files + fs.config(fu.config) + if err := fs.FromResponse(res); err != nil { + return nil, err + } + return fs, nil +} + +func (fu *FileUpdate) gremlin() *dsl.Traversal { + v := g.V().HasLabel(file.Label) + for _, p := range fu.predicates { + p.Gremlin(v) + } + var ( + trs []*dsl.Traversal + ) + if fu.size != nil { + v.Property(dsl.Single, file.FieldSize, *fu.size) + } + if fu.name != nil { + v.Property(dsl.Single, file.FieldName, *fu.name) + } + v.ValueMap(true) + trs = append(trs, v) + return dsl.Join(trs...) +} + +// FileUpdateOne is the builder for updating a single File entity. +type FileUpdateOne struct { + config + id string + size *int + name *string +} + +// SetSize sets the size field. +func (fuo *FileUpdateOne) SetSize(i int) *FileUpdateOne { + fuo.size = &i + return fuo +} + +// SetName sets the name field. +func (fuo *FileUpdateOne) SetName(s string) *FileUpdateOne { + fuo.name = &s + return fuo +} + +// Save executes the query and returns the updated entity. +func (fuo *FileUpdateOne) Save(ctx context.Context) (*File, error) { + if fuo.size != nil { + if err := file.SizeValidator(*fuo.size); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"size\": %v", err) + } + } + switch fuo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return fuo.sqlSave(ctx) + case dialect.Neptune: + return fuo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (fuo *FileUpdateOne) SaveX(ctx context.Context) *File { + f, err := fuo.Save(ctx) + if err != nil { + panic(err) + } + return f +} + +// Exec executes the query on the entity. +func (fuo *FileUpdateOne) Exec(ctx context.Context) error { + _, err := fuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (fuo *FileUpdateOne) ExecX(ctx context.Context) { + if err := fuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (fuo *FileUpdateOne) sqlSave(ctx context.Context) (f *File, err error) { + selector := sql.Select(file.Columns...).From(sql.Table(file.Table)) + file.ID(fuo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = fuo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + f = &File{config: fuo.config} + if err := f.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into File: %v", err) + } + id = f.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: File not found with id: %v", fuo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one File with the same id: %v", fuo.id) + } + + tx, err := fuo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(file.Table).Where(sql.InInts(file.FieldID, ids...)) + ) + if fuo.size != nil { + update = true + builder.Set(file.FieldSize, *fuo.size) + f.Size = *fuo.size + } + if fuo.name != nil { + update = true + builder.Set(file.FieldName, *fuo.name) + f.Name = *fuo.name + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return f, nil +} + +func (fuo *FileUpdateOne) gremlinSave(ctx context.Context) (*File, error) { + res := &gremlin.Response{} + query, bindings := fuo.gremlin(fuo.id).Query() + if err := fuo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + f := &File{config: fuo.config} + if err := f.FromResponse(res); err != nil { + return nil, err + } + return f, nil +} + +func (fuo *FileUpdateOne) gremlin(id string) *dsl.Traversal { + v := g.V(id) + var ( + trs []*dsl.Traversal + ) + if fuo.size != nil { + v.Property(dsl.Single, file.FieldSize, *fuo.size) + } + if fuo.name != nil { + v.Property(dsl.Single, file.FieldName, *fuo.name) + } + v.ValueMap(true) + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/group.go b/entc/integration/ent/group.go new file mode 100644 index 000000000..0c509b7cb --- /dev/null +++ b/entc/integration/ent/group.go @@ -0,0 +1,204 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + "time" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// Group is the model entity for the Group schema. +type Group struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Active holds the value of the "active" field. + Active bool `json:"active,omitempty"` + // Expire holds the value of the "expire" field. + Expire time.Time `json:"expire,omitempty"` + // Type holds the value of the "type" field. + Type *string `json:"type,omitempty"` + // MaxUsers holds the value of the "max_users" field. + MaxUsers int `json:"max_users,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` +} + +// FromResponse scans the gremlin response data into Group. +func (gr *Group) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vgr struct { + ID string `json:"id,omitempty"` + Active bool `json:"active,omitempty"` + Expire int64 `json:"expire,omitempty"` + Type *string `json:"type,omitempty"` + MaxUsers int `json:"max_users,omitempty"` + Name string `json:"name,omitempty"` + } + if err := vmap.Decode(&vgr); err != nil { + return err + } + gr.ID = vgr.ID + gr.Active = vgr.Active + gr.Expire = time.Unix(vgr.Expire, 0) + gr.Type = vgr.Type + gr.MaxUsers = vgr.MaxUsers + gr.Name = vgr.Name + return nil +} + +// FromRows scans the sql response data into Group. +func (gr *Group) FromRows(rows *sql.Rows) error { + var vgr struct { + ID int + Active bool + Expire time.Time + Type sql.NullString + MaxUsers sql.NullInt64 + Name string + } + // the order here should be the same as in the `group.Columns`. + if err := rows.Scan( + &vgr.ID, + &vgr.Active, + &vgr.Expire, + &vgr.Type, + &vgr.MaxUsers, + &vgr.Name, + ); err != nil { + return err + } + gr.ID = strconv.Itoa(vgr.ID) + gr.Active = vgr.Active + gr.Expire = vgr.Expire + if vgr.Type.Valid { + gr.Type = &vgr.Type.String + } + gr.MaxUsers = int(vgr.MaxUsers.Int64) + gr.Name = vgr.Name + return nil +} + +// QueryFiles queries the files edge of the Group. +func (gr *Group) QueryFiles() *FileQuery { + return (&GroupClient{gr.config}).QueryFiles(gr) +} + +// QueryBlocked queries the blocked edge of the Group. +func (gr *Group) QueryBlocked() *UserQuery { + return (&GroupClient{gr.config}).QueryBlocked(gr) +} + +// QueryUsers queries the users edge of the Group. +func (gr *Group) QueryUsers() *UserQuery { + return (&GroupClient{gr.config}).QueryUsers(gr) +} + +// QueryInfo queries the info edge of the Group. +func (gr *Group) QueryInfo() *GroupInfoQuery { + return (&GroupClient{gr.config}).QueryInfo(gr) +} + +// Update returns a builder for updating this Group. +// Note that, you need to call Group.Unwrap() before calling this method, if this Group +// was returned from a transaction, and the transaction was committed or rolled back. +func (gr *Group) Update() *GroupUpdateOne { + return (&GroupClient{gr.config}).UpdateOne(gr) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (gr *Group) Unwrap() *Group { + tx, ok := gr.config.driver.(*txDriver) + if !ok { + panic("ent: Group is not a transactional entity") + } + gr.config.driver = tx.drv + return gr +} + +// String implements the fmt.Stringer. +func (gr *Group) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Group(") + buf.WriteString(fmt.Sprintf("id=%v,", gr.ID)) + buf.WriteString(fmt.Sprintf("active=%v", gr.Active)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("expire=%v", gr.Expire)) + buf.WriteString(", ") + if v := gr.Type; v != nil { + buf.WriteString(fmt.Sprintf("type=%v", *v)) + } + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("max_users=%v", gr.MaxUsers)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("name=%v", gr.Name)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (gr *Group) id() int { + id, _ := strconv.Atoi(gr.ID) + return id +} + +// Groups is a parsable slice of Group. +type Groups []*Group + +// FromResponse scans the gremlin response data into Groups. +func (gr *Groups) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vgr []struct { + ID string `json:"id,omitempty"` + Active bool `json:"active,omitempty"` + Expire int64 `json:"expire,omitempty"` + Type *string `json:"type,omitempty"` + MaxUsers int `json:"max_users,omitempty"` + Name string `json:"name,omitempty"` + } + if err := vmap.Decode(&vgr); err != nil { + return err + } + for _, v := range vgr { + *gr = append(*gr, &Group{ + ID: v.ID, + Active: v.Active, + Expire: time.Unix(v.Expire, 0), + Type: v.Type, + MaxUsers: v.MaxUsers, + Name: v.Name, + }) + } + return nil +} + +// FromRows scans the sql response data into Groups. +func (gr *Groups) FromRows(rows *sql.Rows) error { + for rows.Next() { + vgr := &Group{} + if err := vgr.FromRows(rows); err != nil { + return err + } + *gr = append(*gr, vgr) + } + return nil +} + +func (gr Groups) config(cfg config) { + for i := range gr { + gr[i].config = cfg + } +} diff --git a/entc/integration/ent/group/group.go b/entc/integration/ent/group/group.go new file mode 100644 index 000000000..48e105134 --- /dev/null +++ b/entc/integration/ent/group/group.go @@ -0,0 +1,104 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package group + +import ( + "fbc/ent/entc/integration/ent/schema" +) + +const ( + // Label holds the string label denoting the group type in the database. + Label = "group" + // FilesLabel holds the string label denoting the files edge type in the database. + FilesLabel = "group_files" + // BlockedLabel holds the string label denoting the blocked edge type in the database. + BlockedLabel = "group_blocked" + // UsersInverseLabel holds the string label denoting the users inverse edge type in the database. + UsersInverseLabel = "user_groups" + // InfoLabel holds the string label denoting the info edge type in the database. + InfoLabel = "group_info" + // FieldActive holds the string denoting the active vertex property in the database. + FieldActive = "active" + // DefaultActive holds the default value for the active field. + DefaultActive = true + // FieldExpire holds the string denoting the expire vertex property in the database. + FieldExpire = "expire" + // FieldType holds the string denoting the type vertex property in the database. + FieldType = "type" + // FieldMaxUsers holds the string denoting the max_users vertex property in the database. + FieldMaxUsers = "max_users" + // DefaultMaxUsers holds the default value for the max_users field. + DefaultMaxUsers int = 10 + // FieldName holds the string denoting the name vertex property in the database. + FieldName = "name" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the group in the database. + Table = "groups" + // FilesTable is the table the holds the files relation/edge. + FilesTable = "files" + // FilesInverseTable is the table name for the File entity. + // It exists in this package in order to avoid circular dependency with the "file" package. + FilesInverseTable = "files" + // FilesColumn is the table column denoting the files relation/edge. + FilesColumn = "group_file_id" + // BlockedTable is the table the holds the blocked relation/edge. + BlockedTable = "users" + // BlockedInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + BlockedInverseTable = "users" + // BlockedColumn is the table column denoting the blocked relation/edge. + BlockedColumn = "group_blocked_id" + // UsersTable is the table the holds the users relation/edge. The primary key declared below. + UsersTable = "user_groups" + // UsersInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UsersInverseTable = "users" + // InfoTable is the table the holds the info relation/edge. + InfoTable = "groups" + // InfoInverseTable is the table name for the GroupInfo entity. + // It exists in this package in order to avoid circular dependency with the "groupinfo" package. + InfoInverseTable = "group_infos" + // InfoColumn is the table column denoting the info relation/edge. + InfoColumn = "info_id" +) + +// Columns holds all SQL columns are group fields. +var Columns = []string{ + FieldID, + FieldActive, + FieldExpire, + FieldType, + FieldMaxUsers, + FieldName, +} + +var ( + // UsersPrimaryKey and UsersColumn2 are the table columns denoting the + // primary key for the users relation (M2M). + UsersPrimaryKey = []string{"user_id", "group_id"} +) + +var ( + fields = schema.Group{}.Fields() + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator = fields[2].Validators()[0].(func(string) error) + // MaxUsersValidator is a validator for the "max_users" field. It is called by the builders before save. + MaxUsersValidator = fields[3].Validators()[0].(func(int) error) + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator = func() func(string) error { + validators := fields[4].Validators() + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() +) diff --git a/entc/integration/ent/group/where.go b/entc/integration/ent/group/where.go new file mode 100644 index 000000000..6965ee1ee --- /dev/null +++ b/entc/integration/ent/group/where.go @@ -0,0 +1,940 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package group + +import ( + "strconv" + "time" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Active applies equality check predicate on the "active" field. It's identical to ActiveEQ. +func Active(v bool) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldActive), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldActive, p.EQ(v)) + }, + } +} + +// Expire applies equality check predicate on the "expire" field. It's identical to ExpireEQ. +func Expire(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.EQ(v)) + }, + } +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.EQ(v)) + }, + } +} + +// MaxUsers applies equality check predicate on the "max_users" field. It's identical to MaxUsersEQ. +func MaxUsers(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.EQ(v)) + }, + } +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// ActiveEQ applies the EQ predicate on the "active" field. +func ActiveEQ(v bool) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldActive), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldActive, p.EQ(v)) + }, + } +} + +// ActiveNEQ applies the NEQ predicate on the "active" field. +func ActiveNEQ(v bool) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldActive), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldActive, p.NEQ(v)) + }, + } +} + +// ExpireEQ applies the EQ predicate on the "expire" field. +func ExpireEQ(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.EQ(v)) + }, + } +} + +// ExpireNEQ applies the NEQ predicate on the "expire" field. +func ExpireNEQ(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.NEQ(v)) + }, + } +} + +// ExpireGT applies the GT predicate on the "expire" field. +func ExpireGT(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.GT(v)) + }, + } +} + +// ExpireGTE applies the GTE predicate on the "expire" field. +func ExpireGTE(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.GTE(v)) + }, + } +} + +// ExpireLT applies the LT predicate on the "expire" field. +func ExpireLT(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.LT(v)) + }, + } +} + +// ExpireLTE applies the LTE predicate on the "expire" field. +func ExpireLTE(v time.Time) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldExpire), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.LTE(v)) + }, + } +} + +// ExpireIn applies the In predicate on the "expire" field. +func ExpireIn(vs ...time.Time) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldExpire), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.Within(v...)) + }, + } +} + +// ExpireNotIn applies the NotIn predicate on the "expire" field. +func ExpireNotIn(vs ...time.Time) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldExpire), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldExpire, p.Without(v...)) + }, + } +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.EQ(v)) + }, + } +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.NEQ(v)) + }, + } +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.GT(v)) + }, + } +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.GTE(v)) + }, + } +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.LT(v)) + }, + } +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.LTE(v)) + }, + } +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldType), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.Within(v...)) + }, + } +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldType), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.Without(v...)) + }, + } +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.Containing(v)) + }, + } +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.StartingWith(v)) + }, + } +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldType), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldType, p.EndingWith(v)) + }, + } +} + +// MaxUsersEQ applies the EQ predicate on the "max_users" field. +func MaxUsersEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.EQ(v)) + }, + } +} + +// MaxUsersNEQ applies the NEQ predicate on the "max_users" field. +func MaxUsersNEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.NEQ(v)) + }, + } +} + +// MaxUsersGT applies the GT predicate on the "max_users" field. +func MaxUsersGT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.GT(v)) + }, + } +} + +// MaxUsersGTE applies the GTE predicate on the "max_users" field. +func MaxUsersGTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.GTE(v)) + }, + } +} + +// MaxUsersLT applies the LT predicate on the "max_users" field. +func MaxUsersLT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.LT(v)) + }, + } +} + +// MaxUsersLTE applies the LTE predicate on the "max_users" field. +func MaxUsersLTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.LTE(v)) + }, + } +} + +// MaxUsersIn applies the In predicate on the "max_users" field. +func MaxUsersIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldMaxUsers), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.Within(v...)) + }, + } +} + +// MaxUsersNotIn applies the NotIn predicate on the "max_users" field. +func MaxUsersNotIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldMaxUsers), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.Without(v...)) + }, + } +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.NEQ(v)) + }, + } +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GT(v)) + }, + } +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GTE(v)) + }, + } +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LT(v)) + }, + } +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LTE(v)) + }, + } +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Within(v...)) + }, + } +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Without(v...)) + }, + } +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Containing(v)) + }, + } +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.StartingWith(v)) + }, + } +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EndingWith(v)) + }, + } +} + +// HasFiles applies the HasEdge predicate on the "files" edge. +func HasFiles() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(FilesColumn). + From(sql.Table(FilesTable)). + Where(sql.NotNull(FilesColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(FilesLabel).OutV() + }, + } +} + +// HasFilesWith applies the HasEdge predicate on the "files" edge with a given conditions (other predicates). +func HasFilesWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FilesColumn).From(sql.Table(FilesTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(FilesLabel).Where(tr).OutV() + }, + } +} + +// HasBlocked applies the HasEdge predicate on the "blocked" edge. +func HasBlocked() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(BlockedColumn). + From(sql.Table(BlockedTable)). + Where(sql.NotNull(BlockedColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(BlockedLabel).OutV() + }, + } +} + +// HasBlockedWith applies the HasEdge predicate on the "blocked" edge with a given conditions (other predicates). +func HasBlockedWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(BlockedColumn).From(sql.Table(BlockedTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(BlockedLabel).Where(tr).OutV() + }, + } +} + +// HasUsers applies the HasEdge predicate on the "users" edge. +func HasUsers() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(UsersPrimaryKey[1]).From(sql.Table(UsersTable)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(UsersInverseLabel).InV() + }, + } +} + +// HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates). +func HasUsersWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Table(UsersInverseTable) + t3 := sql.Table(UsersTable) + t4 := sql.Select(t3.C(UsersPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(UsersPrimaryKey[0]), t2.C(FieldID)) + t5 := sql.Select().From(t2) + for _, p := range preds { + p.SQL(t5) + } + t4.FromSelect(t5) + s.Where(sql.In(t1.C(FieldID), t4)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(UsersInverseLabel).Where(tr).InV() + }, + } +} + +// HasInfo applies the HasEdge predicate on the "info" edge. +func HasInfo() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where(sql.NotNull(t1.C(InfoColumn))) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(InfoLabel).OutV() + }, + } +} + +// HasInfoWith applies the HasEdge predicate on the "info" edge with a given conditions (other predicates). +func HasInfoWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FieldID).From(sql.Table(InfoInverseTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(InfoColumn), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(InfoLabel).Where(tr).OutV() + }, + } +} diff --git a/entc/integration/ent/group_create.go b/entc/integration/ent/group_create.go new file mode 100644 index 000000000..3e9dbe65d --- /dev/null +++ b/entc/integration/ent/group_create.go @@ -0,0 +1,410 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// GroupCreate is the builder for creating a Group entity. +type GroupCreate struct { + config + active *bool + expire *time.Time + _type *string + max_users *int + name *string + files map[string]struct{} + blocked map[string]struct{} + users map[string]struct{} + info map[string]struct{} +} + +// SetActive sets the active field. +func (gc *GroupCreate) SetActive(b bool) *GroupCreate { + gc.active = &b + return gc +} + +// SetNillableActive sets the active field if the given value is not nil. +func (gc *GroupCreate) SetNillableActive(b *bool) *GroupCreate { + if b != nil { + gc.SetActive(*b) + } + return gc +} + +// SetExpire sets the expire field. +func (gc *GroupCreate) SetExpire(t time.Time) *GroupCreate { + gc.expire = &t + return gc +} + +// SetType sets the type field. +func (gc *GroupCreate) SetType(s string) *GroupCreate { + gc._type = &s + return gc +} + +// SetNillableType sets the type field if the given value is not nil. +func (gc *GroupCreate) SetNillableType(s *string) *GroupCreate { + if s != nil { + gc.SetType(*s) + } + return gc +} + +// SetMaxUsers sets the max_users field. +func (gc *GroupCreate) SetMaxUsers(i int) *GroupCreate { + gc.max_users = &i + return gc +} + +// SetNillableMaxUsers sets the max_users field if the given value is not nil. +func (gc *GroupCreate) SetNillableMaxUsers(i *int) *GroupCreate { + if i != nil { + gc.SetMaxUsers(*i) + } + return gc +} + +// SetName sets the name field. +func (gc *GroupCreate) SetName(s string) *GroupCreate { + gc.name = &s + return gc +} + +// AddFileIDs adds the files edge to File by ids. +func (gc *GroupCreate) AddFileIDs(ids ...string) *GroupCreate { + if gc.files == nil { + gc.files = make(map[string]struct{}) + } + for i := range ids { + gc.files[ids[i]] = struct{}{} + } + return gc +} + +// AddFiles adds the files edges to File. +func (gc *GroupCreate) AddFiles(f ...*File) *GroupCreate { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return gc.AddFileIDs(ids...) +} + +// AddBlockedIDs adds the blocked edge to User by ids. +func (gc *GroupCreate) AddBlockedIDs(ids ...string) *GroupCreate { + if gc.blocked == nil { + gc.blocked = make(map[string]struct{}) + } + for i := range ids { + gc.blocked[ids[i]] = struct{}{} + } + return gc +} + +// AddBlocked adds the blocked edges to User. +func (gc *GroupCreate) AddBlocked(u ...*User) *GroupCreate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return gc.AddBlockedIDs(ids...) +} + +// AddUserIDs adds the users edge to User by ids. +func (gc *GroupCreate) AddUserIDs(ids ...string) *GroupCreate { + if gc.users == nil { + gc.users = make(map[string]struct{}) + } + for i := range ids { + gc.users[ids[i]] = struct{}{} + } + return gc +} + +// AddUsers adds the users edges to User. +func (gc *GroupCreate) AddUsers(u ...*User) *GroupCreate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return gc.AddUserIDs(ids...) +} + +// SetInfoID sets the info edge to GroupInfo by id. +func (gc *GroupCreate) SetInfoID(id string) *GroupCreate { + if gc.info == nil { + gc.info = make(map[string]struct{}) + } + gc.info[id] = struct{}{} + return gc +} + +// SetInfo sets the info edge to GroupInfo. +func (gc *GroupCreate) SetInfo(g *GroupInfo) *GroupCreate { + return gc.SetInfoID(g.ID) +} + +// Save creates the Group in the database. +func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) { + if gc.active == nil { + v := group.DefaultActive + gc.active = &v + } + if gc.expire == nil { + return nil, errors.New("ent: missing required field \"expire\"") + } + if gc._type != nil { + if err := group.TypeValidator(*gc._type); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"type\": %v", err) + } + } + if gc.max_users == nil { + v := group.DefaultMaxUsers + gc.max_users = &v + } + if err := group.MaxUsersValidator(*gc.max_users); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"max_users\": %v", err) + } + if gc.name == nil { + return nil, errors.New("ent: missing required field \"name\"") + } + if err := group.NameValidator(*gc.name); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"name\": %v", err) + } + if len(gc.info) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"info\"") + } + if gc.info == nil { + return nil, errors.New("ent: missing required edge \"info\"") + } + switch gc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gc.sqlSave(ctx) + case dialect.Neptune: + return gc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (gc *GroupCreate) SaveX(ctx context.Context) *Group { + v, err := gc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (gc *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { + var ( + res sql.Result + gr = &Group{config: gc.config} + ) + tx, err := gc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(group.Table).Default(gc.driver.Dialect()) + if gc.active != nil { + builder.Set(group.FieldActive, *gc.active) + gr.Active = *gc.active + } + if gc.expire != nil { + builder.Set(group.FieldExpire, *gc.expire) + gr.Expire = *gc.expire + } + if gc._type != nil { + builder.Set(group.FieldType, *gc._type) + *gr.Type = *gc._type + } + if gc.max_users != nil { + builder.Set(group.FieldMaxUsers, *gc.max_users) + gr.MaxUsers = *gc.max_users + } + if gc.name != nil { + builder.Set(group.FieldName, *gc.name) + gr.Name = *gc.name + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + gr.ID = strconv.FormatInt(id, 10) + if len(gc.files) > 0 { + p := sql.P() + for eid := range gc.files { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + p.Or().EQ(file.FieldID, eid) + } + query, args := sql.Update(group.FilesTable). + Set(group.FilesColumn, id). + Where(sql.And(p, sql.IsNull(group.FilesColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(gc.files) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"files\" %v already connected to a different \"Group\"", keys(gc.files))}) + } + } + if len(gc.blocked) > 0 { + p := sql.P() + for eid := range gc.blocked { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + p.Or().EQ(user.FieldID, eid) + } + query, args := sql.Update(group.BlockedTable). + Set(group.BlockedColumn, id). + Where(sql.And(p, sql.IsNull(group.BlockedColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(gc.blocked) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"blocked\" %v already connected to a different \"Group\"", keys(gc.blocked))}) + } + } + if len(gc.users) > 0 { + for eid := range gc.users { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + + query, args := sql.Insert(group.UsersTable). + Columns(group.UsersPrimaryKey[1], group.UsersPrimaryKey[0]). + Values(id, eid). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if len(gc.info) > 0 { + for eid := range gc.info { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + query, args := sql.Update(group.InfoTable). + Set(group.InfoColumn, eid). + Where(sql.EQ(group.FieldID, id)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if err := tx.Commit(); err != nil { + return nil, err + } + return gr, nil +} + +func (gc *GroupCreate) gremlinSave(ctx context.Context) (*Group, error) { + res := &gremlin.Response{} + query, bindings := gc.gremlin().Query() + if err := gc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + gr := &Group{config: gc.config} + if err := gr.FromResponse(res); err != nil { + return nil, err + } + return gr, nil +} + +func (gc *GroupCreate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 2) + v := g.AddV(group.Label) + if gc.active != nil { + v.Property(dsl.Single, group.FieldActive, *gc.active) + } + if gc.expire != nil { + v.Property(dsl.Single, group.FieldExpire, *gc.expire) + } + if gc._type != nil { + v.Property(dsl.Single, group.FieldType, *gc._type) + } + if gc.max_users != nil { + v.Property(dsl.Single, group.FieldMaxUsers, *gc.max_users) + } + if gc.name != nil { + v.Property(dsl.Single, group.FieldName, *gc.name) + } + for id := range gc.files { + v.AddE(group.FilesLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.FilesLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(group.Label, group.FilesLabel, id)), + }) + } + for id := range gc.blocked { + v.AddE(group.BlockedLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.BlockedLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(group.Label, group.BlockedLabel, id)), + }) + } + for id := range gc.users { + v.AddE(user.GroupsLabel).From(g.V(id)).InV() + } + for id := range gc.info { + v.AddE(group.InfoLabel).To(g.V(id)).OutV() + } + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr +} diff --git a/entc/integration/ent/group_delete.go b/entc/integration/ent/group_delete.go new file mode 100644 index 000000000..75f34942b --- /dev/null +++ b/entc/integration/ent/group_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/group" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// GroupDelete is the builder for deleting a Group entity. +type GroupDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (gd *GroupDelete) Where(ps ...ent.Predicate) *GroupDelete { + gd.predicates = append(gd.predicates, ps...) + return gd +} + +// Exec executes the deletion query. +func (gd *GroupDelete) Exec(ctx context.Context) error { + switch gd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gd.sqlExec(ctx) + case dialect.Neptune: + return gd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (gd *GroupDelete) ExecX(ctx context.Context) { + if err := gd.Exec(ctx); err != nil { + panic(err) + } +} + +func (gd *GroupDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(group.Table)) + for _, p := range gd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(group.Table).FromSelect(selector).Query() + return gd.driver.Exec(ctx, query, args, &res) +} + +func (gd *GroupDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := gd.gremlin().Query() + return gd.driver.Exec(ctx, query, bindings, res) +} + +func (gd *GroupDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(group.Label) + for _, p := range gd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// GroupDeleteOne is the builder for deleting a single Group entity. +type GroupDeleteOne struct { + gd *GroupDelete +} + +// Exec executes the deletion query. +func (gdo *GroupDeleteOne) Exec(ctx context.Context) error { + return gdo.gd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (gdo *GroupDeleteOne) ExecX(ctx context.Context) { + gdo.gd.ExecX(ctx) +} diff --git a/entc/integration/ent/group_query.go b/entc/integration/ent/group_query.go new file mode 100644 index 000000000..05b3975a6 --- /dev/null +++ b/entc/integration/ent/group_query.go @@ -0,0 +1,682 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// GroupQuery is the builder for querying Group entities. +type GroupQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (gq *GroupQuery) Where(ps ...ent.Predicate) *GroupQuery { + gq.predicates = append(gq.predicates, ps...) + return gq +} + +// Limit adds a limit step to the query. +func (gq *GroupQuery) Limit(limit int) *GroupQuery { + gq.limit = &limit + return gq +} + +// Order adds an order step to the query. +func (gq *GroupQuery) Order(o ...Order) *GroupQuery { + gq.order = append(gq.order, o...) + return gq +} + +// QueryFiles chains the current query on the files edge. +func (gq *GroupQuery) QueryFiles() *FileQuery { + query := &FileQuery{config: gq.config} + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(file.Table) + t2 := gq.sqlQuery() + t2.Select(t2.C(group.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(group.FilesColumn), t2.C(group.FieldID)) + case dialect.Neptune: + gremlin := gq.gremlinQuery() + query.gremlin = gremlin.OutE(group.FilesLabel).InV() + } + return query +} + +// QueryBlocked chains the current query on the blocked edge. +func (gq *GroupQuery) QueryBlocked() *UserQuery { + query := &UserQuery{config: gq.config} + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := gq.sqlQuery() + t2.Select(t2.C(group.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(group.BlockedColumn), t2.C(group.FieldID)) + case dialect.Neptune: + gremlin := gq.gremlinQuery() + query.gremlin = gremlin.OutE(group.BlockedLabel).InV() + } + return query +} + +// QueryUsers chains the current query on the users edge. +func (gq *GroupQuery) QueryUsers() *UserQuery { + query := &UserQuery{config: gq.config} + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := gq.sqlQuery() + t2.Select(t2.C(group.FieldID)) + t3 := sql.Table(group.UsersTable) + t4 := sql.Select(t3.C(group.UsersPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(group.UsersPrimaryKey[1]), t2.C(group.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(group.UsersPrimaryKey[0])) + case dialect.Neptune: + gremlin := gq.gremlinQuery() + query.gremlin = gremlin.InE(user.GroupsLabel).OutV() + } + return query +} + +// QueryInfo chains the current query on the info edge. +func (gq *GroupQuery) QueryInfo() *GroupInfoQuery { + query := &GroupInfoQuery{config: gq.config} + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(groupinfo.Table) + t2 := gq.sqlQuery() + t2.Select(t2.C(group.InfoColumn)) + query.sql = sql.Select(t1.Columns(groupinfo.Columns...)...). + From(t1). + Join(t2). + On(t1.C(groupinfo.FieldID), t2.C(group.InfoColumn)) + case dialect.Neptune: + gremlin := gq.gremlinQuery() + query.gremlin = gremlin.OutE(group.InfoLabel).InV() + } + return query +} + +// Get returns a Group entity by its id. +func (gq *GroupQuery) Get(ctx context.Context, id string) (*Group, error) { + return gq.Where(group.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (gq *GroupQuery) GetX(ctx context.Context, id string) *Group { + gr, err := gq.Get(ctx, id) + if err != nil { + panic(err) + } + return gr +} + +// First returns the first Group entity in the query. Returns *ErrNotFound when no group was found. +func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { + grs, err := gq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(grs) == 0 { + return nil, &ErrNotFound{group.Label} + } + return grs[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (gq *GroupQuery) FirstX(ctx context.Context) *Group { + gr, err := gq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return gr +} + +// FirstID returns the first Group id in the query. Returns *ErrNotFound when no id was found. +func (gq *GroupQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = gq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{group.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (gq *GroupQuery) FirstXID(ctx context.Context) string { + id, err := gq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Group entity in the query, returns an error if not exactly one entity was returned. +func (gq *GroupQuery) Only(ctx context.Context) (*Group, error) { + grs, err := gq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(grs) { + case 1: + return grs[0], nil + case 0: + return nil, &ErrNotFound{group.Label} + default: + return nil, &ErrNotSingular{group.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (gq *GroupQuery) OnlyX(ctx context.Context) *Group { + gr, err := gq.Only(ctx) + if err != nil { + panic(err) + } + return gr +} + +// OnlyID returns the only Group id in the query, returns an error if not exactly one id was returned. +func (gq *GroupQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = gq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{group.Label} + default: + err = &ErrNotSingular{group.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (gq *GroupQuery) OnlyXID(ctx context.Context) string { + id, err := gq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Groups. +func (gq *GroupQuery) All(ctx context.Context) ([]*Group, error) { + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gq.sqlAll(ctx) + case dialect.Neptune: + return gq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (gq *GroupQuery) AllX(ctx context.Context) []*Group { + grs, err := gq.All(ctx) + if err != nil { + panic(err) + } + return grs +} + +// IDs executes the query and returns a list of Group ids. +func (gq *GroupQuery) IDs(ctx context.Context) ([]string, error) { + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gq.sqlIDs(ctx) + case dialect.Neptune: + return gq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (gq *GroupQuery) IDsX(ctx context.Context) []string { + ids, err := gq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (gq *GroupQuery) Count(ctx context.Context) (int, error) { + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gq.sqlCount(ctx) + case dialect.Neptune: + return gq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (gq *GroupQuery) CountX(ctx context.Context) int { + count, err := gq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (gq *GroupQuery) Exist(ctx context.Context) (bool, error) { + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gq.sqlExist(ctx) + case dialect.Neptune: + return gq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (gq *GroupQuery) ExistX(ctx context.Context) bool { + exist, err := gq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Active bool `json:"active,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Group.Query(). +// GroupBy(group.FieldActive). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (gq *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { + group := &GroupGroupBy{config: gq.config} + group.fields = append([]string{field}, fields...) + switch gq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = gq.sqlQuery() + case dialect.Neptune: + group.gremlin = gq.gremlinQuery() + } + return group +} + +func (gq *GroupQuery) sqlAll(ctx context.Context) ([]*Group, error) { + rows := &sql.Rows{} + selector := gq.sqlQuery() + if unique := gq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := gq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var grs Groups + if err := grs.FromRows(rows); err != nil { + return nil, err + } + grs.config(gq.config) + return grs, nil +} + +func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := gq.sqlQuery() + unique := []string{group.FieldID} + if len(gq.unique) > 0 { + unique = gq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := gq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (gq *GroupQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := gq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (gq *GroupQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := gq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (gq *GroupQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(group.Table) + selector := sql.Select(t1.Columns(group.Columns...)...).From(t1) + if gq.sql != nil { + selector = gq.sql + selector.Select(selector.Columns(group.Columns...)...) + } + for _, p := range gq.predicates { + p.SQL(selector) + } + for _, p := range gq.order { + p.SQL(selector) + } + if limit := gq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (gq *GroupQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := gq.gremlinQuery().Query() + if err := gq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (gq *GroupQuery) gremlinAll(ctx context.Context) ([]*Group, error) { + res := &gremlin.Response{} + query, bindings := gq.gremlinQuery().ValueMap(true).Query() + if err := gq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var grs Groups + if err := grs.FromResponse(res); err != nil { + return nil, err + } + grs.config(gq.config) + return grs, nil +} + +func (gq *GroupQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := gq.gremlinQuery().Count().Query() + if err := gq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (gq *GroupQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := gq.gremlinQuery().HasNext().Query() + if err := gq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (gq *GroupQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(group.Label) + if gq.gremlin != nil { + v = gq.gremlin.Clone() + } + for _, p := range gq.predicates { + p.Gremlin(v) + } + if len(gq.order) > 0 { + v.Order() + for _, p := range gq.order { + p.Gremlin(v) + } + } + if limit := gq.limit; limit != nil { + v.Limit(*limit) + } + if unique := gq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// GroupQuery is the builder for group-by Group entities. +type GroupGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ggb *GroupGroupBy) Aggregate(fns ...Aggregate) *GroupGroupBy { + ggb.fns = append(ggb.fns, fns...) + return ggb +} + +// Scan applies the group-by query and scan the result into the given value. +func (ggb *GroupGroupBy) Scan(ctx context.Context, v interface{}) error { + switch ggb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return ggb.sqlScan(ctx, v) + case dialect.Neptune: + return ggb.gremlinScan(ctx, v) + default: + return errors.New("ggb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (ggb *GroupGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := ggb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (ggb *GroupGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(ggb.fields) > 1 { + return nil, errors.New("ent: GroupGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := ggb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (ggb *GroupGroupBy) StringsX(ctx context.Context) []string { + v, err := ggb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (ggb *GroupGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(ggb.fields) > 1 { + return nil, errors.New("ent: GroupGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := ggb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (ggb *GroupGroupBy) IntsX(ctx context.Context) []int { + v, err := ggb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (ggb *GroupGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(ggb.fields) > 1 { + return nil, errors.New("ent: GroupGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := ggb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (ggb *GroupGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := ggb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (ggb *GroupGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(ggb.fields) > 1 { + return nil, errors.New("ent: GroupGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := ggb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (ggb *GroupGroupBy) BoolsX(ctx context.Context) []bool { + v, err := ggb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (ggb *GroupGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := ggb.sqlQuery().Query() + if err := ggb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (ggb *GroupGroupBy) sqlQuery() *sql.Selector { + selector := ggb.sql + columns := make([]string, 0, len(ggb.fields)+len(ggb.fns)) + columns = append(columns, ggb.fields...) + for _, fn := range ggb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(ggb.fields...) +} + +func (ggb *GroupGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := ggb.gremlinQuery().Query() + if err := ggb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(ggb.fields)+len(ggb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (ggb *GroupGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range ggb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range ggb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return ggb.gremlin.Group(). + By(__.Values(ggb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/group_update.go b/entc/integration/ent/group_update.go new file mode 100644 index 000000000..3daa2c1e0 --- /dev/null +++ b/entc/integration/ent/group_update.go @@ -0,0 +1,1205 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// GroupUpdate is the builder for updating Group entities. +type GroupUpdate struct { + config + active *bool + expire *time.Time + _type *string + max_users *int + name *string + files map[string]struct{} + blocked map[string]struct{} + users map[string]struct{} + info map[string]struct{} + removedFiles map[string]struct{} + removedBlocked map[string]struct{} + removedUsers map[string]struct{} + clearedInfo bool + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (gu *GroupUpdate) Where(ps ...ent.Predicate) *GroupUpdate { + gu.predicates = append(gu.predicates, ps...) + return gu +} + +// SetActive sets the active field. +func (gu *GroupUpdate) SetActive(b bool) *GroupUpdate { + gu.active = &b + return gu +} + +// SetNillableActive sets the active field if the given value is not nil. +func (gu *GroupUpdate) SetNillableActive(b *bool) *GroupUpdate { + if b != nil { + gu.SetActive(*b) + } + return gu +} + +// SetExpire sets the expire field. +func (gu *GroupUpdate) SetExpire(t time.Time) *GroupUpdate { + gu.expire = &t + return gu +} + +// SetType sets the type field. +func (gu *GroupUpdate) SetType(s string) *GroupUpdate { + gu._type = &s + return gu +} + +// SetNillableType sets the type field if the given value is not nil. +func (gu *GroupUpdate) SetNillableType(s *string) *GroupUpdate { + if s != nil { + gu.SetType(*s) + } + return gu +} + +// SetMaxUsers sets the max_users field. +func (gu *GroupUpdate) SetMaxUsers(i int) *GroupUpdate { + gu.max_users = &i + return gu +} + +// SetNillableMaxUsers sets the max_users field if the given value is not nil. +func (gu *GroupUpdate) SetNillableMaxUsers(i *int) *GroupUpdate { + if i != nil { + gu.SetMaxUsers(*i) + } + return gu +} + +// SetName sets the name field. +func (gu *GroupUpdate) SetName(s string) *GroupUpdate { + gu.name = &s + return gu +} + +// AddFileIDs adds the files edge to File by ids. +func (gu *GroupUpdate) AddFileIDs(ids ...string) *GroupUpdate { + if gu.files == nil { + gu.files = make(map[string]struct{}) + } + for i := range ids { + gu.files[ids[i]] = struct{}{} + } + return gu +} + +// AddFiles adds the files edges to File. +func (gu *GroupUpdate) AddFiles(f ...*File) *GroupUpdate { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return gu.AddFileIDs(ids...) +} + +// AddBlockedIDs adds the blocked edge to User by ids. +func (gu *GroupUpdate) AddBlockedIDs(ids ...string) *GroupUpdate { + if gu.blocked == nil { + gu.blocked = make(map[string]struct{}) + } + for i := range ids { + gu.blocked[ids[i]] = struct{}{} + } + return gu +} + +// AddBlocked adds the blocked edges to User. +func (gu *GroupUpdate) AddBlocked(u ...*User) *GroupUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return gu.AddBlockedIDs(ids...) +} + +// AddUserIDs adds the users edge to User by ids. +func (gu *GroupUpdate) AddUserIDs(ids ...string) *GroupUpdate { + if gu.users == nil { + gu.users = make(map[string]struct{}) + } + for i := range ids { + gu.users[ids[i]] = struct{}{} + } + return gu +} + +// AddUsers adds the users edges to User. +func (gu *GroupUpdate) AddUsers(u ...*User) *GroupUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return gu.AddUserIDs(ids...) +} + +// SetInfoID sets the info edge to GroupInfo by id. +func (gu *GroupUpdate) SetInfoID(id string) *GroupUpdate { + if gu.info == nil { + gu.info = make(map[string]struct{}) + } + gu.info[id] = struct{}{} + return gu +} + +// SetInfo sets the info edge to GroupInfo. +func (gu *GroupUpdate) SetInfo(g *GroupInfo) *GroupUpdate { + return gu.SetInfoID(g.ID) +} + +// RemoveFileIDs removes the files edge to File by ids. +func (gu *GroupUpdate) RemoveFileIDs(ids ...string) *GroupUpdate { + if gu.removedFiles == nil { + gu.removedFiles = make(map[string]struct{}) + } + for i := range ids { + gu.removedFiles[ids[i]] = struct{}{} + } + return gu +} + +// RemoveFiles removes files edges to File. +func (gu *GroupUpdate) RemoveFiles(f ...*File) *GroupUpdate { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return gu.RemoveFileIDs(ids...) +} + +// RemoveBlockedIDs removes the blocked edge to User by ids. +func (gu *GroupUpdate) RemoveBlockedIDs(ids ...string) *GroupUpdate { + if gu.removedBlocked == nil { + gu.removedBlocked = make(map[string]struct{}) + } + for i := range ids { + gu.removedBlocked[ids[i]] = struct{}{} + } + return gu +} + +// RemoveBlocked removes blocked edges to User. +func (gu *GroupUpdate) RemoveBlocked(u ...*User) *GroupUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return gu.RemoveBlockedIDs(ids...) +} + +// RemoveUserIDs removes the users edge to User by ids. +func (gu *GroupUpdate) RemoveUserIDs(ids ...string) *GroupUpdate { + if gu.removedUsers == nil { + gu.removedUsers = make(map[string]struct{}) + } + for i := range ids { + gu.removedUsers[ids[i]] = struct{}{} + } + return gu +} + +// RemoveUsers removes users edges to User. +func (gu *GroupUpdate) RemoveUsers(u ...*User) *GroupUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return gu.RemoveUserIDs(ids...) +} + +// ClearInfo clears the info edge to GroupInfo. +func (gu *GroupUpdate) ClearInfo() *GroupUpdate { + gu.clearedInfo = true + return gu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { + if gu._type != nil { + if err := group.TypeValidator(*gu._type); err != nil { + return 0, fmt.Errorf("ent: validator failed for field \"type\": %v", err) + } + } + if gu.max_users != nil { + if err := group.MaxUsersValidator(*gu.max_users); err != nil { + return 0, fmt.Errorf("ent: validator failed for field \"max_users\": %v", err) + } + } + if gu.name != nil { + if err := group.NameValidator(*gu.name); err != nil { + return 0, fmt.Errorf("ent: validator failed for field \"name\": %v", err) + } + } + if len(gu.info) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"info\"") + } + if gu.clearedInfo && gu.info == nil { + return 0, errors.New("ent: clearing a unique edge \"info\"") + } + switch gu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := gu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (gu *GroupUpdate) SaveX(ctx context.Context) int { + affected, err := gu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (gu *GroupUpdate) Exec(ctx context.Context) error { + _, err := gu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (gu *GroupUpdate) ExecX(ctx context.Context) { + if err := gu.Exec(ctx); err != nil { + panic(err) + } +} + +func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(group.FieldID).From(sql.Table(group.Table)) + for _, p := range gu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = gu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := gu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(group.Table).Where(sql.InInts(group.FieldID, ids...)) + ) + if gu.active != nil { + update = true + builder.Set(group.FieldActive, *gu.active) + } + if gu.expire != nil { + update = true + builder.Set(group.FieldExpire, *gu.expire) + } + if gu._type != nil { + update = true + builder.Set(group.FieldType, *gu._type) + } + if gu.max_users != nil { + update = true + builder.Set(group.FieldMaxUsers, *gu.max_users) + } + if gu.name != nil { + update = true + builder.Set(group.FieldName, *gu.name) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(gu.removedFiles) > 0 { + eids := make([]int, len(gu.removedFiles)) + for eid := range gu.removedFiles { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(group.FilesTable). + SetNull(group.FilesColumn). + Where(sql.InInts(group.FilesColumn, ids...)). + Where(sql.InInts(file.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(gu.files) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range gu.files { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(file.FieldID, eid) + } + query, args := sql.Update(group.FilesTable). + Set(group.FilesColumn, id). + Where(sql.And(p, sql.IsNull(group.FilesColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(gu.files) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"files\" %v already connected to a different \"Group\"", keys(gu.files))}) + } + } + } + if len(gu.removedBlocked) > 0 { + eids := make([]int, len(gu.removedBlocked)) + for eid := range gu.removedBlocked { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(group.BlockedTable). + SetNull(group.BlockedColumn). + Where(sql.InInts(group.BlockedColumn, ids...)). + Where(sql.InInts(user.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(gu.blocked) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range gu.blocked { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(user.FieldID, eid) + } + query, args := sql.Update(group.BlockedTable). + Set(group.BlockedColumn, id). + Where(sql.And(p, sql.IsNull(group.BlockedColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(gu.blocked) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"blocked\" %v already connected to a different \"Group\"", keys(gu.blocked))}) + } + } + } + if len(gu.removedUsers) > 0 { + eids := make([]int, len(gu.removedUsers)) + for eid := range gu.removedUsers { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(group.UsersTable). + Where(sql.InInts(group.UsersPrimaryKey[1], ids...)). + Where(sql.InInts(group.UsersPrimaryKey[0], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(gu.users) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range gu.users { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(group.UsersTable). + Columns(group.UsersPrimaryKey[1], group.UsersPrimaryKey[0]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if gu.clearedInfo { + query, args := sql.Update(group.InfoTable). + SetNull(group.InfoColumn). + Where(sql.InInts(groupinfo.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(gu.info) > 0 { + for eid := range gu.info { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(group.InfoTable). + Set(group.InfoColumn, eid). + Where(sql.InInts(group.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (gu *GroupUpdate) gremlinSave(ctx context.Context) ([]*Group, error) { + res := &gremlin.Response{} + query, bindings := gu.gremlin().Query() + if err := gu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var grs Groups + grs.config(gu.config) + if err := grs.FromResponse(res); err != nil { + return nil, err + } + return grs, nil +} + +func (gu *GroupUpdate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 2) + v := g.V().HasLabel(group.Label) + for _, p := range gu.predicates { + p.Gremlin(v) + } + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if gu.active != nil { + v.Property(dsl.Single, group.FieldActive, *gu.active) + } + if gu.expire != nil { + v.Property(dsl.Single, group.FieldExpire, *gu.expire) + } + if gu._type != nil { + v.Property(dsl.Single, group.FieldType, *gu._type) + } + if gu.max_users != nil { + v.Property(dsl.Single, group.FieldMaxUsers, *gu.max_users) + } + if gu.name != nil { + v.Property(dsl.Single, group.FieldName, *gu.name) + } + for id := range gu.removedFiles { + tr := rv.Clone().OutE(group.FilesLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range gu.files { + v.AddE(group.FilesLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.FilesLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(group.Label, group.FilesLabel, id)), + }) + } + for id := range gu.removedBlocked { + tr := rv.Clone().OutE(group.BlockedLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range gu.blocked { + v.AddE(group.BlockedLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.BlockedLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(group.Label, group.BlockedLabel, id)), + }) + } + for id := range gu.removedUsers { + tr := rv.Clone().InE(user.GroupsLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range gu.users { + v.AddE(user.GroupsLabel).From(g.V(id)).InV() + } + if gu.clearedInfo { + tr := rv.Clone().OutE(group.InfoLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range gu.info { + v.AddE(group.InfoLabel).To(g.V(id)).OutV() + } + v.ValueMap(true) + if len(constraints) > 0 { + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} + +// GroupUpdateOne is the builder for updating a single Group entity. +type GroupUpdateOne struct { + config + id string + active *bool + expire *time.Time + _type *string + max_users *int + name *string + files map[string]struct{} + blocked map[string]struct{} + users map[string]struct{} + info map[string]struct{} + removedFiles map[string]struct{} + removedBlocked map[string]struct{} + removedUsers map[string]struct{} + clearedInfo bool +} + +// SetActive sets the active field. +func (guo *GroupUpdateOne) SetActive(b bool) *GroupUpdateOne { + guo.active = &b + return guo +} + +// SetNillableActive sets the active field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableActive(b *bool) *GroupUpdateOne { + if b != nil { + guo.SetActive(*b) + } + return guo +} + +// SetExpire sets the expire field. +func (guo *GroupUpdateOne) SetExpire(t time.Time) *GroupUpdateOne { + guo.expire = &t + return guo +} + +// SetType sets the type field. +func (guo *GroupUpdateOne) SetType(s string) *GroupUpdateOne { + guo._type = &s + return guo +} + +// SetNillableType sets the type field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableType(s *string) *GroupUpdateOne { + if s != nil { + guo.SetType(*s) + } + return guo +} + +// SetMaxUsers sets the max_users field. +func (guo *GroupUpdateOne) SetMaxUsers(i int) *GroupUpdateOne { + guo.max_users = &i + return guo +} + +// SetNillableMaxUsers sets the max_users field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableMaxUsers(i *int) *GroupUpdateOne { + if i != nil { + guo.SetMaxUsers(*i) + } + return guo +} + +// SetName sets the name field. +func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne { + guo.name = &s + return guo +} + +// AddFileIDs adds the files edge to File by ids. +func (guo *GroupUpdateOne) AddFileIDs(ids ...string) *GroupUpdateOne { + if guo.files == nil { + guo.files = make(map[string]struct{}) + } + for i := range ids { + guo.files[ids[i]] = struct{}{} + } + return guo +} + +// AddFiles adds the files edges to File. +func (guo *GroupUpdateOne) AddFiles(f ...*File) *GroupUpdateOne { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return guo.AddFileIDs(ids...) +} + +// AddBlockedIDs adds the blocked edge to User by ids. +func (guo *GroupUpdateOne) AddBlockedIDs(ids ...string) *GroupUpdateOne { + if guo.blocked == nil { + guo.blocked = make(map[string]struct{}) + } + for i := range ids { + guo.blocked[ids[i]] = struct{}{} + } + return guo +} + +// AddBlocked adds the blocked edges to User. +func (guo *GroupUpdateOne) AddBlocked(u ...*User) *GroupUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return guo.AddBlockedIDs(ids...) +} + +// AddUserIDs adds the users edge to User by ids. +func (guo *GroupUpdateOne) AddUserIDs(ids ...string) *GroupUpdateOne { + if guo.users == nil { + guo.users = make(map[string]struct{}) + } + for i := range ids { + guo.users[ids[i]] = struct{}{} + } + return guo +} + +// AddUsers adds the users edges to User. +func (guo *GroupUpdateOne) AddUsers(u ...*User) *GroupUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return guo.AddUserIDs(ids...) +} + +// SetInfoID sets the info edge to GroupInfo by id. +func (guo *GroupUpdateOne) SetInfoID(id string) *GroupUpdateOne { + if guo.info == nil { + guo.info = make(map[string]struct{}) + } + guo.info[id] = struct{}{} + return guo +} + +// SetInfo sets the info edge to GroupInfo. +func (guo *GroupUpdateOne) SetInfo(g *GroupInfo) *GroupUpdateOne { + return guo.SetInfoID(g.ID) +} + +// RemoveFileIDs removes the files edge to File by ids. +func (guo *GroupUpdateOne) RemoveFileIDs(ids ...string) *GroupUpdateOne { + if guo.removedFiles == nil { + guo.removedFiles = make(map[string]struct{}) + } + for i := range ids { + guo.removedFiles[ids[i]] = struct{}{} + } + return guo +} + +// RemoveFiles removes files edges to File. +func (guo *GroupUpdateOne) RemoveFiles(f ...*File) *GroupUpdateOne { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return guo.RemoveFileIDs(ids...) +} + +// RemoveBlockedIDs removes the blocked edge to User by ids. +func (guo *GroupUpdateOne) RemoveBlockedIDs(ids ...string) *GroupUpdateOne { + if guo.removedBlocked == nil { + guo.removedBlocked = make(map[string]struct{}) + } + for i := range ids { + guo.removedBlocked[ids[i]] = struct{}{} + } + return guo +} + +// RemoveBlocked removes blocked edges to User. +func (guo *GroupUpdateOne) RemoveBlocked(u ...*User) *GroupUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return guo.RemoveBlockedIDs(ids...) +} + +// RemoveUserIDs removes the users edge to User by ids. +func (guo *GroupUpdateOne) RemoveUserIDs(ids ...string) *GroupUpdateOne { + if guo.removedUsers == nil { + guo.removedUsers = make(map[string]struct{}) + } + for i := range ids { + guo.removedUsers[ids[i]] = struct{}{} + } + return guo +} + +// RemoveUsers removes users edges to User. +func (guo *GroupUpdateOne) RemoveUsers(u ...*User) *GroupUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return guo.RemoveUserIDs(ids...) +} + +// ClearInfo clears the info edge to GroupInfo. +func (guo *GroupUpdateOne) ClearInfo() *GroupUpdateOne { + guo.clearedInfo = true + return guo +} + +// Save executes the query and returns the updated entity. +func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { + if guo._type != nil { + if err := group.TypeValidator(*guo._type); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"type\": %v", err) + } + } + if guo.max_users != nil { + if err := group.MaxUsersValidator(*guo.max_users); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"max_users\": %v", err) + } + } + if guo.name != nil { + if err := group.NameValidator(*guo.name); err != nil { + return nil, fmt.Errorf("ent: validator failed for field \"name\": %v", err) + } + } + if len(guo.info) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"info\"") + } + if guo.clearedInfo && guo.info == nil { + return nil, errors.New("ent: clearing a unique edge \"info\"") + } + switch guo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return guo.sqlSave(ctx) + case dialect.Neptune: + return guo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (guo *GroupUpdateOne) SaveX(ctx context.Context) *Group { + gr, err := guo.Save(ctx) + if err != nil { + panic(err) + } + return gr +} + +// Exec executes the query on the entity. +func (guo *GroupUpdateOne) Exec(ctx context.Context) error { + _, err := guo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (guo *GroupUpdateOne) ExecX(ctx context.Context) { + if err := guo.Exec(ctx); err != nil { + panic(err) + } +} + +func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (gr *Group, err error) { + selector := sql.Select(group.Columns...).From(sql.Table(group.Table)) + group.ID(guo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = guo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + gr = &Group{config: guo.config} + if err := gr.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into Group: %v", err) + } + id = gr.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: Group not found with id: %v", guo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one Group with the same id: %v", guo.id) + } + + tx, err := guo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(group.Table).Where(sql.InInts(group.FieldID, ids...)) + ) + if guo.active != nil { + update = true + builder.Set(group.FieldActive, *guo.active) + gr.Active = *guo.active + } + if guo.expire != nil { + update = true + builder.Set(group.FieldExpire, *guo.expire) + gr.Expire = *guo.expire + } + if guo._type != nil { + update = true + builder.Set(group.FieldType, *guo._type) + *gr.Type = *guo._type + } + if guo.max_users != nil { + update = true + builder.Set(group.FieldMaxUsers, *guo.max_users) + gr.MaxUsers = *guo.max_users + } + if guo.name != nil { + update = true + builder.Set(group.FieldName, *guo.name) + gr.Name = *guo.name + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(guo.removedFiles) > 0 { + eids := make([]int, len(guo.removedFiles)) + for eid := range guo.removedFiles { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(group.FilesTable). + SetNull(group.FilesColumn). + Where(sql.InInts(group.FilesColumn, ids...)). + Where(sql.InInts(file.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(guo.files) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range guo.files { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(file.FieldID, eid) + } + query, args := sql.Update(group.FilesTable). + Set(group.FilesColumn, id). + Where(sql.And(p, sql.IsNull(group.FilesColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(guo.files) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"files\" %v already connected to a different \"Group\"", keys(guo.files))}) + } + } + } + if len(guo.removedBlocked) > 0 { + eids := make([]int, len(guo.removedBlocked)) + for eid := range guo.removedBlocked { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(group.BlockedTable). + SetNull(group.BlockedColumn). + Where(sql.InInts(group.BlockedColumn, ids...)). + Where(sql.InInts(user.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(guo.blocked) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range guo.blocked { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(user.FieldID, eid) + } + query, args := sql.Update(group.BlockedTable). + Set(group.BlockedColumn, id). + Where(sql.And(p, sql.IsNull(group.BlockedColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(guo.blocked) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"blocked\" %v already connected to a different \"Group\"", keys(guo.blocked))}) + } + } + } + if len(guo.removedUsers) > 0 { + eids := make([]int, len(guo.removedUsers)) + for eid := range guo.removedUsers { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(group.UsersTable). + Where(sql.InInts(group.UsersPrimaryKey[1], ids...)). + Where(sql.InInts(group.UsersPrimaryKey[0], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(guo.users) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range guo.users { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(group.UsersTable). + Columns(group.UsersPrimaryKey[1], group.UsersPrimaryKey[0]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if guo.clearedInfo { + query, args := sql.Update(group.InfoTable). + SetNull(group.InfoColumn). + Where(sql.InInts(groupinfo.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(guo.info) > 0 { + for eid := range guo.info { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(group.InfoTable). + Set(group.InfoColumn, eid). + Where(sql.InInts(group.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return gr, nil +} + +func (guo *GroupUpdateOne) gremlinSave(ctx context.Context) (*Group, error) { + res := &gremlin.Response{} + query, bindings := guo.gremlin(guo.id).Query() + if err := guo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + gr := &Group{config: guo.config} + if err := gr.FromResponse(res); err != nil { + return nil, err + } + return gr, nil +} + +func (guo *GroupUpdateOne) gremlin(id string) *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 2) + v := g.V(id) + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if guo.active != nil { + v.Property(dsl.Single, group.FieldActive, *guo.active) + } + if guo.expire != nil { + v.Property(dsl.Single, group.FieldExpire, *guo.expire) + } + if guo._type != nil { + v.Property(dsl.Single, group.FieldType, *guo._type) + } + if guo.max_users != nil { + v.Property(dsl.Single, group.FieldMaxUsers, *guo.max_users) + } + if guo.name != nil { + v.Property(dsl.Single, group.FieldName, *guo.name) + } + for id := range guo.removedFiles { + tr := rv.Clone().OutE(group.FilesLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range guo.files { + v.AddE(group.FilesLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.FilesLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(group.Label, group.FilesLabel, id)), + }) + } + for id := range guo.removedBlocked { + tr := rv.Clone().OutE(group.BlockedLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range guo.blocked { + v.AddE(group.BlockedLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.BlockedLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(group.Label, group.BlockedLabel, id)), + }) + } + for id := range guo.removedUsers { + tr := rv.Clone().InE(user.GroupsLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range guo.users { + v.AddE(user.GroupsLabel).From(g.V(id)).InV() + } + if guo.clearedInfo { + tr := rv.Clone().OutE(group.InfoLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range guo.info { + v.AddE(group.InfoLabel).To(g.V(id)).OutV() + } + v.ValueMap(true) + if len(constraints) > 0 { + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/groupinfo.go b/entc/integration/ent/groupinfo.go new file mode 100644 index 000000000..69c4a47af --- /dev/null +++ b/entc/integration/ent/groupinfo.go @@ -0,0 +1,151 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// GroupInfo is the model entity for the GroupInfo schema. +type GroupInfo struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Desc holds the value of the "desc" field. + Desc string `json:"desc,omitempty"` + // MaxUsers holds the value of the "max_users" field. + MaxUsers int `json:"max_users,omitempty"` +} + +// FromResponse scans the gremlin response data into GroupInfo. +func (gi *GroupInfo) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vgi struct { + ID string `json:"id,omitempty"` + Desc string `json:"desc,omitempty"` + MaxUsers int `json:"max_users,omitempty"` + } + if err := vmap.Decode(&vgi); err != nil { + return err + } + gi.ID = vgi.ID + gi.Desc = vgi.Desc + gi.MaxUsers = vgi.MaxUsers + return nil +} + +// FromRows scans the sql response data into GroupInfo. +func (gi *GroupInfo) FromRows(rows *sql.Rows) error { + var vgi struct { + ID int + Desc string + MaxUsers int + } + // the order here should be the same as in the `groupinfo.Columns`. + if err := rows.Scan( + &vgi.ID, + &vgi.Desc, + &vgi.MaxUsers, + ); err != nil { + return err + } + gi.ID = strconv.Itoa(vgi.ID) + gi.Desc = vgi.Desc + gi.MaxUsers = vgi.MaxUsers + return nil +} + +// QueryGroups queries the groups edge of the GroupInfo. +func (gi *GroupInfo) QueryGroups() *GroupQuery { + return (&GroupInfoClient{gi.config}).QueryGroups(gi) +} + +// Update returns a builder for updating this GroupInfo. +// Note that, you need to call GroupInfo.Unwrap() before calling this method, if this GroupInfo +// was returned from a transaction, and the transaction was committed or rolled back. +func (gi *GroupInfo) Update() *GroupInfoUpdateOne { + return (&GroupInfoClient{gi.config}).UpdateOne(gi) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (gi *GroupInfo) Unwrap() *GroupInfo { + tx, ok := gi.config.driver.(*txDriver) + if !ok { + panic("ent: GroupInfo is not a transactional entity") + } + gi.config.driver = tx.drv + return gi +} + +// String implements the fmt.Stringer. +func (gi *GroupInfo) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("GroupInfo(") + buf.WriteString(fmt.Sprintf("id=%v,", gi.ID)) + buf.WriteString(fmt.Sprintf("desc=%v", gi.Desc)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("max_users=%v", gi.MaxUsers)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (gi *GroupInfo) id() int { + id, _ := strconv.Atoi(gi.ID) + return id +} + +// GroupInfos is a parsable slice of GroupInfo. +type GroupInfos []*GroupInfo + +// FromResponse scans the gremlin response data into GroupInfos. +func (gi *GroupInfos) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vgi []struct { + ID string `json:"id,omitempty"` + Desc string `json:"desc,omitempty"` + MaxUsers int `json:"max_users,omitempty"` + } + if err := vmap.Decode(&vgi); err != nil { + return err + } + for _, v := range vgi { + *gi = append(*gi, &GroupInfo{ + ID: v.ID, + Desc: v.Desc, + MaxUsers: v.MaxUsers, + }) + } + return nil +} + +// FromRows scans the sql response data into GroupInfos. +func (gi *GroupInfos) FromRows(rows *sql.Rows) error { + for rows.Next() { + vgi := &GroupInfo{} + if err := vgi.FromRows(rows); err != nil { + return err + } + *gi = append(*gi, vgi) + } + return nil +} + +func (gi GroupInfos) config(cfg config) { + for i := range gi { + gi[i].config = cfg + } +} diff --git a/entc/integration/ent/groupinfo/groupinfo.go b/entc/integration/ent/groupinfo/groupinfo.go new file mode 100644 index 000000000..f0fc885c0 --- /dev/null +++ b/entc/integration/ent/groupinfo/groupinfo.go @@ -0,0 +1,34 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package groupinfo + +const ( + // Label holds the string label denoting the groupinfo type in the database. + Label = "group_info" + // GroupsInverseLabel holds the string label denoting the groups inverse edge type in the database. + GroupsInverseLabel = "group_info" + // FieldDesc holds the string denoting the desc vertex property in the database. + FieldDesc = "desc" + // FieldMaxUsers holds the string denoting the max_users vertex property in the database. + FieldMaxUsers = "max_users" + // DefaultMaxUsers holds the default value for the max_users field. + DefaultMaxUsers int = 10000 + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the groupinfo in the database. + Table = "group_infos" + // GroupsTable is the table the holds the groups relation/edge. + GroupsTable = "groups" + // GroupsInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupsInverseTable = "groups" + // GroupsColumn is the table column denoting the groups relation/edge. + GroupsColumn = "info_id" +) + +// Columns holds all SQL columns are groupinfo fields. +var Columns = []string{ + FieldID, + FieldDesc, + FieldMaxUsers, +} diff --git a/entc/integration/ent/groupinfo/where.go b/entc/integration/ent/groupinfo/where.go new file mode 100644 index 000000000..8b890473b --- /dev/null +++ b/entc/integration/ent/groupinfo/where.go @@ -0,0 +1,490 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package groupinfo + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Desc applies equality check predicate on the "desc" field. It's identical to DescEQ. +func Desc(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.EQ(v)) + }, + } +} + +// MaxUsers applies equality check predicate on the "max_users" field. It's identical to MaxUsersEQ. +func MaxUsers(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.EQ(v)) + }, + } +} + +// DescEQ applies the EQ predicate on the "desc" field. +func DescEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.EQ(v)) + }, + } +} + +// DescNEQ applies the NEQ predicate on the "desc" field. +func DescNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.NEQ(v)) + }, + } +} + +// DescGT applies the GT predicate on the "desc" field. +func DescGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.GT(v)) + }, + } +} + +// DescGTE applies the GTE predicate on the "desc" field. +func DescGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.GTE(v)) + }, + } +} + +// DescLT applies the LT predicate on the "desc" field. +func DescLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.LT(v)) + }, + } +} + +// DescLTE applies the LTE predicate on the "desc" field. +func DescLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.LTE(v)) + }, + } +} + +// DescIn applies the In predicate on the "desc" field. +func DescIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldDesc), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.Within(v...)) + }, + } +} + +// DescNotIn applies the NotIn predicate on the "desc" field. +func DescNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldDesc), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.Without(v...)) + }, + } +} + +// DescContains applies the Contains predicate on the "desc" field. +func DescContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.Containing(v)) + }, + } +} + +// DescHasPrefix applies the HasPrefix predicate on the "desc" field. +func DescHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.StartingWith(v)) + }, + } +} + +// DescHasSuffix applies the HasSuffix predicate on the "desc" field. +func DescHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldDesc), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldDesc, p.EndingWith(v)) + }, + } +} + +// MaxUsersEQ applies the EQ predicate on the "max_users" field. +func MaxUsersEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.EQ(v)) + }, + } +} + +// MaxUsersNEQ applies the NEQ predicate on the "max_users" field. +func MaxUsersNEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.NEQ(v)) + }, + } +} + +// MaxUsersGT applies the GT predicate on the "max_users" field. +func MaxUsersGT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.GT(v)) + }, + } +} + +// MaxUsersGTE applies the GTE predicate on the "max_users" field. +func MaxUsersGTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.GTE(v)) + }, + } +} + +// MaxUsersLT applies the LT predicate on the "max_users" field. +func MaxUsersLT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.LT(v)) + }, + } +} + +// MaxUsersLTE applies the LTE predicate on the "max_users" field. +func MaxUsersLTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldMaxUsers), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.LTE(v)) + }, + } +} + +// MaxUsersIn applies the In predicate on the "max_users" field. +func MaxUsersIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldMaxUsers), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.Within(v...)) + }, + } +} + +// MaxUsersNotIn applies the NotIn predicate on the "max_users" field. +func MaxUsersNotIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldMaxUsers), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldMaxUsers, p.Without(v...)) + }, + } +} + +// HasGroups applies the HasEdge predicate on the "groups" edge. +func HasGroups() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(GroupsColumn). + From(sql.Table(GroupsTable)). + Where(sql.NotNull(GroupsColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(GroupsInverseLabel).InV() + }, + } +} + +// HasGroupsWith applies the HasEdge predicate on the "groups" edge with a given conditions (other predicates). +func HasGroupsWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(GroupsColumn).From(sql.Table(GroupsTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(GroupsInverseLabel).Where(tr).InV() + }, + } +} diff --git a/entc/integration/ent/groupinfo_create.go b/entc/integration/ent/groupinfo_create.go new file mode 100644 index 000000000..0e744d15b --- /dev/null +++ b/entc/integration/ent/groupinfo_create.go @@ -0,0 +1,201 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// GroupInfoCreate is the builder for creating a GroupInfo entity. +type GroupInfoCreate struct { + config + desc *string + max_users *int + groups map[string]struct{} +} + +// SetDesc sets the desc field. +func (gic *GroupInfoCreate) SetDesc(s string) *GroupInfoCreate { + gic.desc = &s + return gic +} + +// SetMaxUsers sets the max_users field. +func (gic *GroupInfoCreate) SetMaxUsers(i int) *GroupInfoCreate { + gic.max_users = &i + return gic +} + +// SetNillableMaxUsers sets the max_users field if the given value is not nil. +func (gic *GroupInfoCreate) SetNillableMaxUsers(i *int) *GroupInfoCreate { + if i != nil { + gic.SetMaxUsers(*i) + } + return gic +} + +// AddGroupIDs adds the groups edge to Group by ids. +func (gic *GroupInfoCreate) AddGroupIDs(ids ...string) *GroupInfoCreate { + if gic.groups == nil { + gic.groups = make(map[string]struct{}) + } + for i := range ids { + gic.groups[ids[i]] = struct{}{} + } + return gic +} + +// AddGroups adds the groups edges to Group. +func (gic *GroupInfoCreate) AddGroups(g ...*Group) *GroupInfoCreate { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return gic.AddGroupIDs(ids...) +} + +// Save creates the GroupInfo in the database. +func (gic *GroupInfoCreate) Save(ctx context.Context) (*GroupInfo, error) { + if gic.desc == nil { + return nil, errors.New("ent: missing required field \"desc\"") + } + if gic.max_users == nil { + v := groupinfo.DefaultMaxUsers + gic.max_users = &v + } + switch gic.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gic.sqlSave(ctx) + case dialect.Neptune: + return gic.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (gic *GroupInfoCreate) SaveX(ctx context.Context) *GroupInfo { + v, err := gic.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (gic *GroupInfoCreate) sqlSave(ctx context.Context) (*GroupInfo, error) { + var ( + res sql.Result + gi = &GroupInfo{config: gic.config} + ) + tx, err := gic.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(groupinfo.Table).Default(gic.driver.Dialect()) + if gic.desc != nil { + builder.Set(groupinfo.FieldDesc, *gic.desc) + gi.Desc = *gic.desc + } + if gic.max_users != nil { + builder.Set(groupinfo.FieldMaxUsers, *gic.max_users) + gi.MaxUsers = *gic.max_users + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + gi.ID = strconv.FormatInt(id, 10) + if len(gic.groups) > 0 { + p := sql.P() + for eid := range gic.groups { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + p.Or().EQ(group.FieldID, eid) + } + query, args := sql.Update(groupinfo.GroupsTable). + Set(groupinfo.GroupsColumn, id). + Where(sql.And(p, sql.IsNull(groupinfo.GroupsColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(gic.groups) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"groups\" %v already connected to a different \"GroupInfo\"", keys(gic.groups))}) + } + } + if err := tx.Commit(); err != nil { + return nil, err + } + return gi, nil +} + +func (gic *GroupInfoCreate) gremlinSave(ctx context.Context) (*GroupInfo, error) { + res := &gremlin.Response{} + query, bindings := gic.gremlin().Query() + if err := gic.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + gi := &GroupInfo{config: gic.config} + if err := gi.FromResponse(res); err != nil { + return nil, err + } + return gi, nil +} + +func (gic *GroupInfoCreate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.AddV(groupinfo.Label) + if gic.desc != nil { + v.Property(dsl.Single, groupinfo.FieldDesc, *gic.desc) + } + if gic.max_users != nil { + v.Property(dsl.Single, groupinfo.FieldMaxUsers, *gic.max_users) + } + for id := range gic.groups { + v.AddE(group.InfoLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.InfoLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(groupinfo.Label, group.InfoLabel, id)), + }) + } + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr +} diff --git a/entc/integration/ent/groupinfo_delete.go b/entc/integration/ent/groupinfo_delete.go new file mode 100644 index 000000000..80d9d78f6 --- /dev/null +++ b/entc/integration/ent/groupinfo_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/groupinfo" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// GroupInfoDelete is the builder for deleting a GroupInfo entity. +type GroupInfoDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (gid *GroupInfoDelete) Where(ps ...ent.Predicate) *GroupInfoDelete { + gid.predicates = append(gid.predicates, ps...) + return gid +} + +// Exec executes the deletion query. +func (gid *GroupInfoDelete) Exec(ctx context.Context) error { + switch gid.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gid.sqlExec(ctx) + case dialect.Neptune: + return gid.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (gid *GroupInfoDelete) ExecX(ctx context.Context) { + if err := gid.Exec(ctx); err != nil { + panic(err) + } +} + +func (gid *GroupInfoDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(groupinfo.Table)) + for _, p := range gid.predicates { + p.SQL(selector) + } + query, args := sql.Delete(groupinfo.Table).FromSelect(selector).Query() + return gid.driver.Exec(ctx, query, args, &res) +} + +func (gid *GroupInfoDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := gid.gremlin().Query() + return gid.driver.Exec(ctx, query, bindings, res) +} + +func (gid *GroupInfoDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(groupinfo.Label) + for _, p := range gid.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// GroupInfoDeleteOne is the builder for deleting a single GroupInfo entity. +type GroupInfoDeleteOne struct { + gid *GroupInfoDelete +} + +// Exec executes the deletion query. +func (gido *GroupInfoDeleteOne) Exec(ctx context.Context) error { + return gido.gid.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (gido *GroupInfoDeleteOne) ExecX(ctx context.Context) { + gido.gid.ExecX(ctx) +} diff --git a/entc/integration/ent/groupinfo_query.go b/entc/integration/ent/groupinfo_query.go new file mode 100644 index 000000000..070aef675 --- /dev/null +++ b/entc/integration/ent/groupinfo_query.go @@ -0,0 +1,618 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// GroupInfoQuery is the builder for querying GroupInfo entities. +type GroupInfoQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (giq *GroupInfoQuery) Where(ps ...ent.Predicate) *GroupInfoQuery { + giq.predicates = append(giq.predicates, ps...) + return giq +} + +// Limit adds a limit step to the query. +func (giq *GroupInfoQuery) Limit(limit int) *GroupInfoQuery { + giq.limit = &limit + return giq +} + +// Order adds an order step to the query. +func (giq *GroupInfoQuery) Order(o ...Order) *GroupInfoQuery { + giq.order = append(giq.order, o...) + return giq +} + +// QueryGroups chains the current query on the groups edge. +func (giq *GroupInfoQuery) QueryGroups() *GroupQuery { + query := &GroupQuery{config: giq.config} + switch giq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(group.Table) + t2 := giq.sqlQuery() + t2.Select(t2.C(groupinfo.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(groupinfo.GroupsColumn), t2.C(groupinfo.FieldID)) + case dialect.Neptune: + gremlin := giq.gremlinQuery() + query.gremlin = gremlin.InE(group.InfoLabel).OutV() + } + return query +} + +// Get returns a GroupInfo entity by its id. +func (giq *GroupInfoQuery) Get(ctx context.Context, id string) (*GroupInfo, error) { + return giq.Where(groupinfo.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (giq *GroupInfoQuery) GetX(ctx context.Context, id string) *GroupInfo { + gi, err := giq.Get(ctx, id) + if err != nil { + panic(err) + } + return gi +} + +// First returns the first GroupInfo entity in the query. Returns *ErrNotFound when no groupinfo was found. +func (giq *GroupInfoQuery) First(ctx context.Context) (*GroupInfo, error) { + gis, err := giq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(gis) == 0 { + return nil, &ErrNotFound{groupinfo.Label} + } + return gis[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (giq *GroupInfoQuery) FirstX(ctx context.Context) *GroupInfo { + gi, err := giq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return gi +} + +// FirstID returns the first GroupInfo id in the query. Returns *ErrNotFound when no id was found. +func (giq *GroupInfoQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = giq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{groupinfo.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (giq *GroupInfoQuery) FirstXID(ctx context.Context) string { + id, err := giq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only GroupInfo entity in the query, returns an error if not exactly one entity was returned. +func (giq *GroupInfoQuery) Only(ctx context.Context) (*GroupInfo, error) { + gis, err := giq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(gis) { + case 1: + return gis[0], nil + case 0: + return nil, &ErrNotFound{groupinfo.Label} + default: + return nil, &ErrNotSingular{groupinfo.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (giq *GroupInfoQuery) OnlyX(ctx context.Context) *GroupInfo { + gi, err := giq.Only(ctx) + if err != nil { + panic(err) + } + return gi +} + +// OnlyID returns the only GroupInfo id in the query, returns an error if not exactly one id was returned. +func (giq *GroupInfoQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = giq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{groupinfo.Label} + default: + err = &ErrNotSingular{groupinfo.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (giq *GroupInfoQuery) OnlyXID(ctx context.Context) string { + id, err := giq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of GroupInfos. +func (giq *GroupInfoQuery) All(ctx context.Context) ([]*GroupInfo, error) { + switch giq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return giq.sqlAll(ctx) + case dialect.Neptune: + return giq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (giq *GroupInfoQuery) AllX(ctx context.Context) []*GroupInfo { + gis, err := giq.All(ctx) + if err != nil { + panic(err) + } + return gis +} + +// IDs executes the query and returns a list of GroupInfo ids. +func (giq *GroupInfoQuery) IDs(ctx context.Context) ([]string, error) { + switch giq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return giq.sqlIDs(ctx) + case dialect.Neptune: + return giq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (giq *GroupInfoQuery) IDsX(ctx context.Context) []string { + ids, err := giq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (giq *GroupInfoQuery) Count(ctx context.Context) (int, error) { + switch giq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return giq.sqlCount(ctx) + case dialect.Neptune: + return giq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (giq *GroupInfoQuery) CountX(ctx context.Context) int { + count, err := giq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (giq *GroupInfoQuery) Exist(ctx context.Context) (bool, error) { + switch giq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return giq.sqlExist(ctx) + case dialect.Neptune: + return giq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (giq *GroupInfoQuery) ExistX(ctx context.Context) bool { + exist, err := giq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Desc string `json:"desc,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.GroupInfo.Query(). +// GroupBy(groupinfo.FieldDesc). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (giq *GroupInfoQuery) GroupBy(field string, fields ...string) *GroupInfoGroupBy { + group := &GroupInfoGroupBy{config: giq.config} + group.fields = append([]string{field}, fields...) + switch giq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = giq.sqlQuery() + case dialect.Neptune: + group.gremlin = giq.gremlinQuery() + } + return group +} + +func (giq *GroupInfoQuery) sqlAll(ctx context.Context) ([]*GroupInfo, error) { + rows := &sql.Rows{} + selector := giq.sqlQuery() + if unique := giq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := giq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var gis GroupInfos + if err := gis.FromRows(rows); err != nil { + return nil, err + } + gis.config(giq.config) + return gis, nil +} + +func (giq *GroupInfoQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := giq.sqlQuery() + unique := []string{groupinfo.FieldID} + if len(giq.unique) > 0 { + unique = giq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := giq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (giq *GroupInfoQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := giq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (giq *GroupInfoQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := giq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (giq *GroupInfoQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(groupinfo.Table) + selector := sql.Select(t1.Columns(groupinfo.Columns...)...).From(t1) + if giq.sql != nil { + selector = giq.sql + selector.Select(selector.Columns(groupinfo.Columns...)...) + } + for _, p := range giq.predicates { + p.SQL(selector) + } + for _, p := range giq.order { + p.SQL(selector) + } + if limit := giq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (giq *GroupInfoQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := giq.gremlinQuery().Query() + if err := giq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (giq *GroupInfoQuery) gremlinAll(ctx context.Context) ([]*GroupInfo, error) { + res := &gremlin.Response{} + query, bindings := giq.gremlinQuery().ValueMap(true).Query() + if err := giq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var gis GroupInfos + if err := gis.FromResponse(res); err != nil { + return nil, err + } + gis.config(giq.config) + return gis, nil +} + +func (giq *GroupInfoQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := giq.gremlinQuery().Count().Query() + if err := giq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (giq *GroupInfoQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := giq.gremlinQuery().HasNext().Query() + if err := giq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (giq *GroupInfoQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(groupinfo.Label) + if giq.gremlin != nil { + v = giq.gremlin.Clone() + } + for _, p := range giq.predicates { + p.Gremlin(v) + } + if len(giq.order) > 0 { + v.Order() + for _, p := range giq.order { + p.Gremlin(v) + } + } + if limit := giq.limit; limit != nil { + v.Limit(*limit) + } + if unique := giq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// GroupInfoQuery is the builder for group-by GroupInfo entities. +type GroupInfoGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (gigb *GroupInfoGroupBy) Aggregate(fns ...Aggregate) *GroupInfoGroupBy { + gigb.fns = append(gigb.fns, fns...) + return gigb +} + +// Scan applies the group-by query and scan the result into the given value. +func (gigb *GroupInfoGroupBy) Scan(ctx context.Context, v interface{}) error { + switch gigb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return gigb.sqlScan(ctx, v) + case dialect.Neptune: + return gigb.gremlinScan(ctx, v) + default: + return errors.New("gigb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (gigb *GroupInfoGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := gigb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (gigb *GroupInfoGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(gigb.fields) > 1 { + return nil, errors.New("ent: GroupInfoGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := gigb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (gigb *GroupInfoGroupBy) StringsX(ctx context.Context) []string { + v, err := gigb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (gigb *GroupInfoGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(gigb.fields) > 1 { + return nil, errors.New("ent: GroupInfoGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := gigb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (gigb *GroupInfoGroupBy) IntsX(ctx context.Context) []int { + v, err := gigb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (gigb *GroupInfoGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(gigb.fields) > 1 { + return nil, errors.New("ent: GroupInfoGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := gigb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (gigb *GroupInfoGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := gigb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (gigb *GroupInfoGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(gigb.fields) > 1 { + return nil, errors.New("ent: GroupInfoGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := gigb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (gigb *GroupInfoGroupBy) BoolsX(ctx context.Context) []bool { + v, err := gigb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (gigb *GroupInfoGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := gigb.sqlQuery().Query() + if err := gigb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (gigb *GroupInfoGroupBy) sqlQuery() *sql.Selector { + selector := gigb.sql + columns := make([]string, 0, len(gigb.fields)+len(gigb.fns)) + columns = append(columns, gigb.fields...) + for _, fn := range gigb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(gigb.fields...) +} + +func (gigb *GroupInfoGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := gigb.gremlinQuery().Query() + if err := gigb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(gigb.fields)+len(gigb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (gigb *GroupInfoGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range gigb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range gigb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return gigb.gremlin.Group(). + By(__.Values(gigb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/groupinfo_update.go b/entc/integration/ent/groupinfo_update.go new file mode 100644 index 000000000..6f56914c8 --- /dev/null +++ b/entc/integration/ent/groupinfo_update.go @@ -0,0 +1,557 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// GroupInfoUpdate is the builder for updating GroupInfo entities. +type GroupInfoUpdate struct { + config + desc *string + max_users *int + groups map[string]struct{} + removedGroups map[string]struct{} + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (giu *GroupInfoUpdate) Where(ps ...ent.Predicate) *GroupInfoUpdate { + giu.predicates = append(giu.predicates, ps...) + return giu +} + +// SetDesc sets the desc field. +func (giu *GroupInfoUpdate) SetDesc(s string) *GroupInfoUpdate { + giu.desc = &s + return giu +} + +// SetMaxUsers sets the max_users field. +func (giu *GroupInfoUpdate) SetMaxUsers(i int) *GroupInfoUpdate { + giu.max_users = &i + return giu +} + +// SetNillableMaxUsers sets the max_users field if the given value is not nil. +func (giu *GroupInfoUpdate) SetNillableMaxUsers(i *int) *GroupInfoUpdate { + if i != nil { + giu.SetMaxUsers(*i) + } + return giu +} + +// AddGroupIDs adds the groups edge to Group by ids. +func (giu *GroupInfoUpdate) AddGroupIDs(ids ...string) *GroupInfoUpdate { + if giu.groups == nil { + giu.groups = make(map[string]struct{}) + } + for i := range ids { + giu.groups[ids[i]] = struct{}{} + } + return giu +} + +// AddGroups adds the groups edges to Group. +func (giu *GroupInfoUpdate) AddGroups(g ...*Group) *GroupInfoUpdate { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return giu.AddGroupIDs(ids...) +} + +// RemoveGroupIDs removes the groups edge to Group by ids. +func (giu *GroupInfoUpdate) RemoveGroupIDs(ids ...string) *GroupInfoUpdate { + if giu.removedGroups == nil { + giu.removedGroups = make(map[string]struct{}) + } + for i := range ids { + giu.removedGroups[ids[i]] = struct{}{} + } + return giu +} + +// RemoveGroups removes groups edges to Group. +func (giu *GroupInfoUpdate) RemoveGroups(g ...*Group) *GroupInfoUpdate { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return giu.RemoveGroupIDs(ids...) +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (giu *GroupInfoUpdate) Save(ctx context.Context) (int, error) { + switch giu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return giu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := giu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (giu *GroupInfoUpdate) SaveX(ctx context.Context) int { + affected, err := giu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (giu *GroupInfoUpdate) Exec(ctx context.Context) error { + _, err := giu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (giu *GroupInfoUpdate) ExecX(ctx context.Context) { + if err := giu.Exec(ctx); err != nil { + panic(err) + } +} + +func (giu *GroupInfoUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(groupinfo.FieldID).From(sql.Table(groupinfo.Table)) + for _, p := range giu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = giu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := giu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(groupinfo.Table).Where(sql.InInts(groupinfo.FieldID, ids...)) + ) + if giu.desc != nil { + update = true + builder.Set(groupinfo.FieldDesc, *giu.desc) + } + if giu.max_users != nil { + update = true + builder.Set(groupinfo.FieldMaxUsers, *giu.max_users) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(giu.removedGroups) > 0 { + eids := make([]int, len(giu.removedGroups)) + for eid := range giu.removedGroups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(groupinfo.GroupsTable). + SetNull(groupinfo.GroupsColumn). + Where(sql.InInts(groupinfo.GroupsColumn, ids...)). + Where(sql.InInts(group.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(giu.groups) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range giu.groups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(group.FieldID, eid) + } + query, args := sql.Update(groupinfo.GroupsTable). + Set(groupinfo.GroupsColumn, id). + Where(sql.And(p, sql.IsNull(groupinfo.GroupsColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(giu.groups) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"groups\" %v already connected to a different \"GroupInfo\"", keys(giu.groups))}) + } + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (giu *GroupInfoUpdate) gremlinSave(ctx context.Context) ([]*GroupInfo, error) { + res := &gremlin.Response{} + query, bindings := giu.gremlin().Query() + if err := giu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var gis GroupInfos + gis.config(giu.config) + if err := gis.FromResponse(res); err != nil { + return nil, err + } + return gis, nil +} + +func (giu *GroupInfoUpdate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.V().HasLabel(groupinfo.Label) + for _, p := range giu.predicates { + p.Gremlin(v) + } + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if giu.desc != nil { + v.Property(dsl.Single, groupinfo.FieldDesc, *giu.desc) + } + if giu.max_users != nil { + v.Property(dsl.Single, groupinfo.FieldMaxUsers, *giu.max_users) + } + for id := range giu.removedGroups { + tr := rv.Clone().InE(group.InfoLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range giu.groups { + v.AddE(group.InfoLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.InfoLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(groupinfo.Label, group.InfoLabel, id)), + }) + } + v.ValueMap(true) + if len(constraints) > 0 { + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} + +// GroupInfoUpdateOne is the builder for updating a single GroupInfo entity. +type GroupInfoUpdateOne struct { + config + id string + desc *string + max_users *int + groups map[string]struct{} + removedGroups map[string]struct{} +} + +// SetDesc sets the desc field. +func (giuo *GroupInfoUpdateOne) SetDesc(s string) *GroupInfoUpdateOne { + giuo.desc = &s + return giuo +} + +// SetMaxUsers sets the max_users field. +func (giuo *GroupInfoUpdateOne) SetMaxUsers(i int) *GroupInfoUpdateOne { + giuo.max_users = &i + return giuo +} + +// SetNillableMaxUsers sets the max_users field if the given value is not nil. +func (giuo *GroupInfoUpdateOne) SetNillableMaxUsers(i *int) *GroupInfoUpdateOne { + if i != nil { + giuo.SetMaxUsers(*i) + } + return giuo +} + +// AddGroupIDs adds the groups edge to Group by ids. +func (giuo *GroupInfoUpdateOne) AddGroupIDs(ids ...string) *GroupInfoUpdateOne { + if giuo.groups == nil { + giuo.groups = make(map[string]struct{}) + } + for i := range ids { + giuo.groups[ids[i]] = struct{}{} + } + return giuo +} + +// AddGroups adds the groups edges to Group. +func (giuo *GroupInfoUpdateOne) AddGroups(g ...*Group) *GroupInfoUpdateOne { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return giuo.AddGroupIDs(ids...) +} + +// RemoveGroupIDs removes the groups edge to Group by ids. +func (giuo *GroupInfoUpdateOne) RemoveGroupIDs(ids ...string) *GroupInfoUpdateOne { + if giuo.removedGroups == nil { + giuo.removedGroups = make(map[string]struct{}) + } + for i := range ids { + giuo.removedGroups[ids[i]] = struct{}{} + } + return giuo +} + +// RemoveGroups removes groups edges to Group. +func (giuo *GroupInfoUpdateOne) RemoveGroups(g ...*Group) *GroupInfoUpdateOne { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return giuo.RemoveGroupIDs(ids...) +} + +// Save executes the query and returns the updated entity. +func (giuo *GroupInfoUpdateOne) Save(ctx context.Context) (*GroupInfo, error) { + switch giuo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return giuo.sqlSave(ctx) + case dialect.Neptune: + return giuo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (giuo *GroupInfoUpdateOne) SaveX(ctx context.Context) *GroupInfo { + gi, err := giuo.Save(ctx) + if err != nil { + panic(err) + } + return gi +} + +// Exec executes the query on the entity. +func (giuo *GroupInfoUpdateOne) Exec(ctx context.Context) error { + _, err := giuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (giuo *GroupInfoUpdateOne) ExecX(ctx context.Context) { + if err := giuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (giuo *GroupInfoUpdateOne) sqlSave(ctx context.Context) (gi *GroupInfo, err error) { + selector := sql.Select(groupinfo.Columns...).From(sql.Table(groupinfo.Table)) + groupinfo.ID(giuo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = giuo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + gi = &GroupInfo{config: giuo.config} + if err := gi.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into GroupInfo: %v", err) + } + id = gi.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: GroupInfo not found with id: %v", giuo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one GroupInfo with the same id: %v", giuo.id) + } + + tx, err := giuo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(groupinfo.Table).Where(sql.InInts(groupinfo.FieldID, ids...)) + ) + if giuo.desc != nil { + update = true + builder.Set(groupinfo.FieldDesc, *giuo.desc) + gi.Desc = *giuo.desc + } + if giuo.max_users != nil { + update = true + builder.Set(groupinfo.FieldMaxUsers, *giuo.max_users) + gi.MaxUsers = *giuo.max_users + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(giuo.removedGroups) > 0 { + eids := make([]int, len(giuo.removedGroups)) + for eid := range giuo.removedGroups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(groupinfo.GroupsTable). + SetNull(groupinfo.GroupsColumn). + Where(sql.InInts(groupinfo.GroupsColumn, ids...)). + Where(sql.InInts(group.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(giuo.groups) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range giuo.groups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(group.FieldID, eid) + } + query, args := sql.Update(groupinfo.GroupsTable). + Set(groupinfo.GroupsColumn, id). + Where(sql.And(p, sql.IsNull(groupinfo.GroupsColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(giuo.groups) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"groups\" %v already connected to a different \"GroupInfo\"", keys(giuo.groups))}) + } + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return gi, nil +} + +func (giuo *GroupInfoUpdateOne) gremlinSave(ctx context.Context) (*GroupInfo, error) { + res := &gremlin.Response{} + query, bindings := giuo.gremlin(giuo.id).Query() + if err := giuo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + gi := &GroupInfo{config: giuo.config} + if err := gi.FromResponse(res); err != nil { + return nil, err + } + return gi, nil +} + +func (giuo *GroupInfoUpdateOne) gremlin(id string) *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.V(id) + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if giuo.desc != nil { + v.Property(dsl.Single, groupinfo.FieldDesc, *giuo.desc) + } + if giuo.max_users != nil { + v.Property(dsl.Single, groupinfo.FieldMaxUsers, *giuo.max_users) + } + for id := range giuo.removedGroups { + tr := rv.Clone().InE(group.InfoLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range giuo.groups { + v.AddE(group.InfoLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(group.InfoLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(groupinfo.Label, group.InfoLabel, id)), + }) + } + v.ValueMap(true) + if len(constraints) > 0 { + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/migrate/migrate.go b/entc/integration/ent/migrate/migrate.go new file mode 100644 index 000000000..ecac79503 --- /dev/null +++ b/entc/integration/ent/migrate/migrate.go @@ -0,0 +1,317 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package migrate + +import ( + "fbc/ent/dialect/sql/schema" + "fbc/ent/field" +) + +var ( + nullable = true + // CardsColumns holds the columns for the "cards" table. + CardsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "number", Type: field.TypeString}, + {Name: "owner_id", Type: field.TypeInt, Unique: true, Nullable: &nullable}, + } + // CardsTable holds the schema information for the "cards" table. + CardsTable = &schema.Table{ + Name: "cards", + Columns: CardsColumns, + PrimaryKey: []*schema.Column{CardsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "cards_users_card", + Columns: []*schema.Column{CardsColumns[2]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // CommentsColumns holds the columns for the "comments" table. + CommentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + } + // CommentsTable holds the schema information for the "comments" table. + CommentsTable = &schema.Table{ + Name: "comments", + Columns: CommentsColumns, + PrimaryKey: []*schema.Column{CommentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{}, + } + // FilesColumns holds the columns for the "files" table. + FilesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "size", Type: field.TypeInt}, + {Name: "name", Type: field.TypeString}, + {Name: "group_file_id", Type: field.TypeInt, Nullable: &nullable}, + {Name: "user_file_id", Type: field.TypeInt, Nullable: &nullable}, + } + // FilesTable holds the schema information for the "files" table. + FilesTable = &schema.Table{ + Name: "files", + Columns: FilesColumns, + PrimaryKey: []*schema.Column{FilesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "files_groups_files", + Columns: []*schema.Column{FilesColumns[3]}, + + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "files_users_files", + Columns: []*schema.Column{FilesColumns[4]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // GroupsColumns holds the columns for the "groups" table. + GroupsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "active", Type: field.TypeBool}, + {Name: "expire", Type: field.TypeTime}, + {Name: "type", Type: field.TypeString}, + {Name: "max_users", Type: field.TypeInt}, + {Name: "name", Type: field.TypeString}, + {Name: "info_id", Type: field.TypeInt, Nullable: &nullable}, + } + // GroupsTable holds the schema information for the "groups" table. + GroupsTable = &schema.Table{ + Name: "groups", + Columns: GroupsColumns, + PrimaryKey: []*schema.Column{GroupsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "groups_group_infos_info", + Columns: []*schema.Column{GroupsColumns[6]}, + + RefColumns: []*schema.Column{GroupInfosColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // GroupInfosColumns holds the columns for the "group_infos" table. + GroupInfosColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "desc", Type: field.TypeString}, + {Name: "max_users", Type: field.TypeInt}, + } + // GroupInfosTable holds the schema information for the "group_infos" table. + GroupInfosTable = &schema.Table{ + Name: "group_infos", + Columns: GroupInfosColumns, + PrimaryKey: []*schema.Column{GroupInfosColumns[0]}, + ForeignKeys: []*schema.ForeignKey{}, + } + // NodesColumns holds the columns for the "nodes" table. + NodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "value", Type: field.TypeInt}, + {Name: "prev_id", Type: field.TypeInt, Unique: true, Nullable: &nullable}, + } + // NodesTable holds the schema information for the "nodes" table. + NodesTable = &schema.Table{ + Name: "nodes", + Columns: NodesColumns, + PrimaryKey: []*schema.Column{NodesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "nodes_nodes_next", + Columns: []*schema.Column{NodesColumns[2]}, + + RefColumns: []*schema.Column{NodesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // PetsColumns holds the columns for the "pets" table. + PetsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString}, + {Name: "owner_id", Type: field.TypeInt, Nullable: &nullable}, + {Name: "team_id", Type: field.TypeInt, Unique: true, Nullable: &nullable}, + } + // PetsTable holds the schema information for the "pets" table. + PetsTable = &schema.Table{ + Name: "pets", + Columns: PetsColumns, + PrimaryKey: []*schema.Column{PetsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "pets_users_pets", + Columns: []*schema.Column{PetsColumns[2]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "pets_users_team", + Columns: []*schema.Column{PetsColumns[3]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "age", Type: field.TypeInt}, + {Name: "name", Type: field.TypeString}, + {Name: "last", Type: field.TypeString}, + {Name: "nickname", Type: field.TypeString, Unique: true}, + {Name: "phone", Type: field.TypeString, Unique: true}, + {Name: "group_blocked_id", Type: field.TypeInt, Nullable: &nullable}, + {Name: "user_spouse_id", Type: field.TypeInt, Unique: true, Nullable: &nullable}, + {Name: "parent_id", Type: field.TypeInt, Nullable: &nullable}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "users_groups_blocked", + Columns: []*schema.Column{UsersColumns[6]}, + + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "users_users_spouse", + Columns: []*schema.Column{UsersColumns[7]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "users_users_parent", + Columns: []*schema.Column{UsersColumns[8]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // UserGroupsColumns holds the columns for the "user_groups" table. + UserGroupsColumns = []*schema.Column{ + {Name: "user_id", Type: field.TypeInt}, + {Name: "group_id", Type: field.TypeInt}, + } + // UserGroupsTable holds the schema information for the "user_groups" table. + UserGroupsTable = &schema.Table{ + Name: "user_groups", + Columns: UserGroupsColumns, + PrimaryKey: []*schema.Column{UserGroupsColumns[0], UserGroupsColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_groups_user_id", + Columns: []*schema.Column{UserGroupsColumns[0]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "user_groups_group_id", + Columns: []*schema.Column{UserGroupsColumns[1]}, + + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // UserFriendsColumns holds the columns for the "user_friends" table. + UserFriendsColumns = []*schema.Column{ + {Name: "user_id", Type: field.TypeInt}, + {Name: "friend_id", Type: field.TypeInt}, + } + // UserFriendsTable holds the schema information for the "user_friends" table. + UserFriendsTable = &schema.Table{ + Name: "user_friends", + Columns: UserFriendsColumns, + PrimaryKey: []*schema.Column{UserFriendsColumns[0], UserFriendsColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_friends_user_id", + Columns: []*schema.Column{UserFriendsColumns[0]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "user_friends_friend_id", + Columns: []*schema.Column{UserFriendsColumns[1]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // UserFollowingColumns holds the columns for the "user_following" table. + UserFollowingColumns = []*schema.Column{ + {Name: "user_id", Type: field.TypeInt}, + {Name: "follower_id", Type: field.TypeInt}, + } + // UserFollowingTable holds the schema information for the "user_following" table. + UserFollowingTable = &schema.Table{ + Name: "user_following", + Columns: UserFollowingColumns, + PrimaryKey: []*schema.Column{UserFollowingColumns[0], UserFollowingColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_following_user_id", + Columns: []*schema.Column{UserFollowingColumns[0]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "user_following_follower_id", + Columns: []*schema.Column{UserFollowingColumns[1]}, + + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + CardsTable, + CommentsTable, + FilesTable, + GroupsTable, + GroupInfosTable, + NodesTable, + PetsTable, + UsersTable, + UserGroupsTable, + UserFriendsTable, + UserFollowingTable, + } +) + +func init() { + CardsTable.ForeignKeys[0].RefTable = UsersTable + FilesTable.ForeignKeys[0].RefTable = GroupsTable + FilesTable.ForeignKeys[1].RefTable = UsersTable + GroupsTable.ForeignKeys[0].RefTable = GroupInfosTable + NodesTable.ForeignKeys[0].RefTable = NodesTable + PetsTable.ForeignKeys[0].RefTable = UsersTable + PetsTable.ForeignKeys[1].RefTable = UsersTable + UsersTable.ForeignKeys[0].RefTable = GroupsTable + UsersTable.ForeignKeys[1].RefTable = UsersTable + UsersTable.ForeignKeys[2].RefTable = UsersTable + UserGroupsTable.ForeignKeys[0].RefTable = UsersTable + UserGroupsTable.ForeignKeys[1].RefTable = GroupsTable + UserFriendsTable.ForeignKeys[0].RefTable = UsersTable + UserFriendsTable.ForeignKeys[1].RefTable = UsersTable + UserFollowingTable.ForeignKeys[0].RefTable = UsersTable + UserFollowingTable.ForeignKeys[1].RefTable = UsersTable +} diff --git a/entc/integration/ent/migrate/schema.go b/entc/integration/ent/migrate/schema.go new file mode 100644 index 000000000..455fce24a --- /dev/null +++ b/entc/integration/ent/migrate/schema.go @@ -0,0 +1,41 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql/schema" +) + +// SQLDialect wraps the dialect.Driver with additional migration methods. +type SQLDriver interface { + Create(context.Context, ...*schema.Table) error +} + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv SQLDriver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { + s := &Schema{} + switch drv.Dialect() { + case dialect.MySQL: + s.drv = &schema.MySQL{Driver: drv} + case dialect.SQLite: + s.drv = &schema.SQLite{Driver: drv} + } + return s +} + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context) error { + if s.drv == nil { + return fmt.Errorf("ent/migrate: dialect does not support migration") + } + return s.drv.Create(ctx, Tables...) +} diff --git a/entc/integration/ent/node.go b/entc/integration/ent/node.go new file mode 100644 index 000000000..bdedc29e9 --- /dev/null +++ b/entc/integration/ent/node.go @@ -0,0 +1,145 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// Node is the model entity for the Node schema. +type Node struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Value holds the value of the "value" field. + Value int `json:"value,omitempty"` +} + +// FromResponse scans the gremlin response data into Node. +func (n *Node) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vn struct { + ID string `json:"id,omitempty"` + Value int `json:"value,omitempty"` + } + if err := vmap.Decode(&vn); err != nil { + return err + } + n.ID = vn.ID + n.Value = vn.Value + return nil +} + +// FromRows scans the sql response data into Node. +func (n *Node) FromRows(rows *sql.Rows) error { + var vn struct { + ID int + Value sql.NullInt64 + } + // the order here should be the same as in the `node.Columns`. + if err := rows.Scan( + &vn.ID, + &vn.Value, + ); err != nil { + return err + } + n.ID = strconv.Itoa(vn.ID) + n.Value = int(vn.Value.Int64) + return nil +} + +// QueryPrev queries the prev edge of the Node. +func (n *Node) QueryPrev() *NodeQuery { + return (&NodeClient{n.config}).QueryPrev(n) +} + +// QueryNext queries the next edge of the Node. +func (n *Node) QueryNext() *NodeQuery { + return (&NodeClient{n.config}).QueryNext(n) +} + +// Update returns a builder for updating this Node. +// Note that, you need to call Node.Unwrap() before calling this method, if this Node +// was returned from a transaction, and the transaction was committed or rolled back. +func (n *Node) Update() *NodeUpdateOne { + return (&NodeClient{n.config}).UpdateOne(n) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (n *Node) Unwrap() *Node { + tx, ok := n.config.driver.(*txDriver) + if !ok { + panic("ent: Node is not a transactional entity") + } + n.config.driver = tx.drv + return n +} + +// String implements the fmt.Stringer. +func (n *Node) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Node(") + buf.WriteString(fmt.Sprintf("id=%v,", n.ID)) + buf.WriteString(fmt.Sprintf("value=%v", n.Value)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (n *Node) id() int { + id, _ := strconv.Atoi(n.ID) + return id +} + +// Nodes is a parsable slice of Node. +type Nodes []*Node + +// FromResponse scans the gremlin response data into Nodes. +func (n *Nodes) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vn []struct { + ID string `json:"id,omitempty"` + Value int `json:"value,omitempty"` + } + if err := vmap.Decode(&vn); err != nil { + return err + } + for _, v := range vn { + *n = append(*n, &Node{ + ID: v.ID, + Value: v.Value, + }) + } + return nil +} + +// FromRows scans the sql response data into Nodes. +func (n *Nodes) FromRows(rows *sql.Rows) error { + for rows.Next() { + vn := &Node{} + if err := vn.FromRows(rows); err != nil { + return err + } + *n = append(*n, vn) + } + return nil +} + +func (n Nodes) config(cfg config) { + for i := range n { + n[i].config = cfg + } +} diff --git a/entc/integration/ent/node/node.go b/entc/integration/ent/node/node.go new file mode 100644 index 000000000..e5679462b --- /dev/null +++ b/entc/integration/ent/node/node.go @@ -0,0 +1,32 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package node + +const ( + // Label holds the string label denoting the node type in the database. + Label = "node" + // PrevInverseLabel holds the string label denoting the prev inverse edge type in the database. + PrevInverseLabel = "node_next" + // NextLabel holds the string label denoting the next edge type in the database. + NextLabel = "node_next" + // FieldValue holds the string denoting the value vertex property in the database. + FieldValue = "value" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the node in the database. + Table = "nodes" + // PrevTable is the table the holds the prev relation/edge. + PrevTable = "nodes" + // PrevColumn is the table column denoting the prev relation/edge. + PrevColumn = "prev_id" + // NextTable is the table the holds the next relation/edge. + NextTable = "nodes" + // NextColumn is the table column denoting the next relation/edge. + NextColumn = "prev_id" +) + +// Columns holds all SQL columns are node fields. +var Columns = []string{ + FieldID, + FieldValue, +} diff --git a/entc/integration/ent/node/where.go b/entc/integration/ent/node/where.go new file mode 100644 index 000000000..8d9ce6c03 --- /dev/null +++ b/entc/integration/ent/node/where.go @@ -0,0 +1,360 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package node + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.EQ(v)) + }, + } +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.EQ(v)) + }, + } +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.NEQ(v)) + }, + } +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.GT(v)) + }, + } +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.GTE(v)) + }, + } +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.LT(v)) + }, + } +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldValue), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.LTE(v)) + }, + } +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldValue), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.Within(v...)) + }, + } +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldValue), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldValue, p.Without(v...)) + }, + } +} + +// HasPrev applies the HasEdge predicate on the "prev" edge. +func HasPrev() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where(sql.NotNull(t1.C(PrevColumn))) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(PrevInverseLabel).InV() + }, + } +} + +// HasPrevWith applies the HasEdge predicate on the "prev" edge with a given conditions (other predicates). +func HasPrevWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FieldID).From(sql.Table(PrevTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(PrevColumn), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(PrevInverseLabel).Where(tr).InV() + }, + } +} + +// HasNext applies the HasEdge predicate on the "next" edge. +func HasNext() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(NextColumn). + From(sql.Table(NextTable)). + Where(sql.NotNull(NextColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(NextLabel).OutV() + }, + } +} + +// HasNextWith applies the HasEdge predicate on the "next" edge with a given conditions (other predicates). +func HasNextWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(NextColumn).From(sql.Table(NextTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(NextLabel).Where(tr).OutV() + }, + } +} diff --git a/entc/integration/ent/node_create.go b/entc/integration/ent/node_create.go new file mode 100644 index 000000000..4e79c1579 --- /dev/null +++ b/entc/integration/ent/node_create.go @@ -0,0 +1,233 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/node" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// NodeCreate is the builder for creating a Node entity. +type NodeCreate struct { + config + value *int + prev map[string]struct{} + next map[string]struct{} +} + +// SetValue sets the value field. +func (nc *NodeCreate) SetValue(i int) *NodeCreate { + nc.value = &i + return nc +} + +// SetNillableValue sets the value field if the given value is not nil. +func (nc *NodeCreate) SetNillableValue(i *int) *NodeCreate { + if i != nil { + nc.SetValue(*i) + } + return nc +} + +// SetPrevID sets the prev edge to Node by id. +func (nc *NodeCreate) SetPrevID(id string) *NodeCreate { + if nc.prev == nil { + nc.prev = make(map[string]struct{}) + } + nc.prev[id] = struct{}{} + return nc +} + +// SetNillablePrevID sets the prev edge to Node by id if the given value is not nil. +func (nc *NodeCreate) SetNillablePrevID(id *string) *NodeCreate { + if id != nil { + nc = nc.SetPrevID(*id) + } + return nc +} + +// SetPrev sets the prev edge to Node. +func (nc *NodeCreate) SetPrev(n *Node) *NodeCreate { + return nc.SetPrevID(n.ID) +} + +// SetNextID sets the next edge to Node by id. +func (nc *NodeCreate) SetNextID(id string) *NodeCreate { + if nc.next == nil { + nc.next = make(map[string]struct{}) + } + nc.next[id] = struct{}{} + return nc +} + +// SetNillableNextID sets the next edge to Node by id if the given value is not nil. +func (nc *NodeCreate) SetNillableNextID(id *string) *NodeCreate { + if id != nil { + nc = nc.SetNextID(*id) + } + return nc +} + +// SetNext sets the next edge to Node. +func (nc *NodeCreate) SetNext(n *Node) *NodeCreate { + return nc.SetNextID(n.ID) +} + +// Save creates the Node in the database. +func (nc *NodeCreate) Save(ctx context.Context) (*Node, error) { + if len(nc.prev) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"prev\"") + } + if len(nc.next) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"next\"") + } + switch nc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nc.sqlSave(ctx) + case dialect.Neptune: + return nc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (nc *NodeCreate) SaveX(ctx context.Context) *Node { + v, err := nc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (nc *NodeCreate) sqlSave(ctx context.Context) (*Node, error) { + var ( + res sql.Result + n = &Node{config: nc.config} + ) + tx, err := nc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(node.Table).Default(nc.driver.Dialect()) + if nc.value != nil { + builder.Set(node.FieldValue, *nc.value) + n.Value = *nc.value + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + n.ID = strconv.FormatInt(id, 10) + if len(nc.prev) > 0 { + eid, err := strconv.Atoi(keys(nc.prev)[0]) + if err != nil { + return nil, err + } + query, args := sql.Update(node.PrevTable). + Set(node.PrevColumn, eid). + Where(sql.EQ(node.FieldID, id).And().IsNull(node.PrevColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(nc.prev) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"prev\" %v already connected to a different \"Node\"", keys(nc.prev))}) + } + } + if len(nc.next) > 0 { + eid, err := strconv.Atoi(keys(nc.next)[0]) + if err != nil { + return nil, err + } + query, args := sql.Update(node.NextTable). + Set(node.NextColumn, id). + Where(sql.EQ(node.FieldID, eid).And().IsNull(node.NextColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(nc.next) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"next\" %v already connected to a different \"Node\"", keys(nc.next))}) + } + } + if err := tx.Commit(); err != nil { + return nil, err + } + return n, nil +} + +func (nc *NodeCreate) gremlinSave(ctx context.Context) (*Node, error) { + res := &gremlin.Response{} + query, bindings := nc.gremlin().Query() + if err := nc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + n := &Node{config: nc.config} + if err := n.FromResponse(res); err != nil { + return nil, err + } + return n, nil +} + +func (nc *NodeCreate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 2) + v := g.AddV(node.Label) + if nc.value != nil { + v.Property(dsl.Single, node.FieldValue, *nc.value) + } + for id := range nc.prev { + v.AddE(node.NextLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(node.NextLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(node.Label, node.NextLabel, id)), + }) + } + for id := range nc.next { + v.AddE(node.NextLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(node.NextLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(node.Label, node.NextLabel, id)), + }) + } + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr +} diff --git a/entc/integration/ent/node_delete.go b/entc/integration/ent/node_delete.go new file mode 100644 index 000000000..24ee2d457 --- /dev/null +++ b/entc/integration/ent/node_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/node" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// NodeDelete is the builder for deleting a Node entity. +type NodeDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (nd *NodeDelete) Where(ps ...ent.Predicate) *NodeDelete { + nd.predicates = append(nd.predicates, ps...) + return nd +} + +// Exec executes the deletion query. +func (nd *NodeDelete) Exec(ctx context.Context) error { + switch nd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nd.sqlExec(ctx) + case dialect.Neptune: + return nd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (nd *NodeDelete) ExecX(ctx context.Context) { + if err := nd.Exec(ctx); err != nil { + panic(err) + } +} + +func (nd *NodeDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(node.Table)) + for _, p := range nd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(node.Table).FromSelect(selector).Query() + return nd.driver.Exec(ctx, query, args, &res) +} + +func (nd *NodeDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := nd.gremlin().Query() + return nd.driver.Exec(ctx, query, bindings, res) +} + +func (nd *NodeDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(node.Label) + for _, p := range nd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// NodeDeleteOne is the builder for deleting a single Node entity. +type NodeDeleteOne struct { + nd *NodeDelete +} + +// Exec executes the deletion query. +func (ndo *NodeDeleteOne) Exec(ctx context.Context) error { + return ndo.nd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ndo *NodeDeleteOne) ExecX(ctx context.Context) { + ndo.nd.ExecX(ctx) +} diff --git a/entc/integration/ent/node_query.go b/entc/integration/ent/node_query.go new file mode 100644 index 000000000..6f2ea5735 --- /dev/null +++ b/entc/integration/ent/node_query.go @@ -0,0 +1,636 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/node" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// NodeQuery is the builder for querying Node entities. +type NodeQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (nq *NodeQuery) Where(ps ...ent.Predicate) *NodeQuery { + nq.predicates = append(nq.predicates, ps...) + return nq +} + +// Limit adds a limit step to the query. +func (nq *NodeQuery) Limit(limit int) *NodeQuery { + nq.limit = &limit + return nq +} + +// Order adds an order step to the query. +func (nq *NodeQuery) Order(o ...Order) *NodeQuery { + nq.order = append(nq.order, o...) + return nq +} + +// QueryPrev chains the current query on the prev edge. +func (nq *NodeQuery) QueryPrev() *NodeQuery { + query := &NodeQuery{config: nq.config} + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(node.Table) + t2 := nq.sqlQuery() + t2.Select(t2.C(node.PrevColumn)) + query.sql = sql.Select(t1.Columns(node.Columns...)...). + From(t1). + Join(t2). + On(t1.C(node.FieldID), t2.C(node.PrevColumn)) + case dialect.Neptune: + gremlin := nq.gremlinQuery() + query.gremlin = gremlin.InE(node.NextLabel).OutV() + } + return query +} + +// QueryNext chains the current query on the next edge. +func (nq *NodeQuery) QueryNext() *NodeQuery { + query := &NodeQuery{config: nq.config} + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(node.Table) + t2 := nq.sqlQuery() + t2.Select(t2.C(node.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(node.NextColumn), t2.C(node.FieldID)) + case dialect.Neptune: + gremlin := nq.gremlinQuery() + query.gremlin = gremlin.OutE(node.NextLabel).InV() + } + return query +} + +// Get returns a Node entity by its id. +func (nq *NodeQuery) Get(ctx context.Context, id string) (*Node, error) { + return nq.Where(node.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (nq *NodeQuery) GetX(ctx context.Context, id string) *Node { + n, err := nq.Get(ctx, id) + if err != nil { + panic(err) + } + return n +} + +// First returns the first Node entity in the query. Returns *ErrNotFound when no node was found. +func (nq *NodeQuery) First(ctx context.Context) (*Node, error) { + ns, err := nq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(ns) == 0 { + return nil, &ErrNotFound{node.Label} + } + return ns[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (nq *NodeQuery) FirstX(ctx context.Context) *Node { + n, err := nq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return n +} + +// FirstID returns the first Node id in the query. Returns *ErrNotFound when no id was found. +func (nq *NodeQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = nq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{node.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (nq *NodeQuery) FirstXID(ctx context.Context) string { + id, err := nq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Node entity in the query, returns an error if not exactly one entity was returned. +func (nq *NodeQuery) Only(ctx context.Context) (*Node, error) { + ns, err := nq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(ns) { + case 1: + return ns[0], nil + case 0: + return nil, &ErrNotFound{node.Label} + default: + return nil, &ErrNotSingular{node.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (nq *NodeQuery) OnlyX(ctx context.Context) *Node { + n, err := nq.Only(ctx) + if err != nil { + panic(err) + } + return n +} + +// OnlyID returns the only Node id in the query, returns an error if not exactly one id was returned. +func (nq *NodeQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = nq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{node.Label} + default: + err = &ErrNotSingular{node.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (nq *NodeQuery) OnlyXID(ctx context.Context) string { + id, err := nq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Nodes. +func (nq *NodeQuery) All(ctx context.Context) ([]*Node, error) { + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nq.sqlAll(ctx) + case dialect.Neptune: + return nq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (nq *NodeQuery) AllX(ctx context.Context) []*Node { + ns, err := nq.All(ctx) + if err != nil { + panic(err) + } + return ns +} + +// IDs executes the query and returns a list of Node ids. +func (nq *NodeQuery) IDs(ctx context.Context) ([]string, error) { + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nq.sqlIDs(ctx) + case dialect.Neptune: + return nq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (nq *NodeQuery) IDsX(ctx context.Context) []string { + ids, err := nq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (nq *NodeQuery) Count(ctx context.Context) (int, error) { + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nq.sqlCount(ctx) + case dialect.Neptune: + return nq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (nq *NodeQuery) CountX(ctx context.Context) int { + count, err := nq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (nq *NodeQuery) Exist(ctx context.Context) (bool, error) { + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nq.sqlExist(ctx) + case dialect.Neptune: + return nq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (nq *NodeQuery) ExistX(ctx context.Context) bool { + exist, err := nq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Value int `json:"value,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Node.Query(). +// GroupBy(node.FieldValue). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (nq *NodeQuery) GroupBy(field string, fields ...string) *NodeGroupBy { + group := &NodeGroupBy{config: nq.config} + group.fields = append([]string{field}, fields...) + switch nq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = nq.sqlQuery() + case dialect.Neptune: + group.gremlin = nq.gremlinQuery() + } + return group +} + +func (nq *NodeQuery) sqlAll(ctx context.Context) ([]*Node, error) { + rows := &sql.Rows{} + selector := nq.sqlQuery() + if unique := nq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := nq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ns Nodes + if err := ns.FromRows(rows); err != nil { + return nil, err + } + ns.config(nq.config) + return ns, nil +} + +func (nq *NodeQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := nq.sqlQuery() + unique := []string{node.FieldID} + if len(nq.unique) > 0 { + unique = nq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := nq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (nq *NodeQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := nq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (nq *NodeQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := nq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (nq *NodeQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(node.Table) + selector := sql.Select(t1.Columns(node.Columns...)...).From(t1) + if nq.sql != nil { + selector = nq.sql + selector.Select(selector.Columns(node.Columns...)...) + } + for _, p := range nq.predicates { + p.SQL(selector) + } + for _, p := range nq.order { + p.SQL(selector) + } + if limit := nq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (nq *NodeQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := nq.gremlinQuery().Query() + if err := nq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (nq *NodeQuery) gremlinAll(ctx context.Context) ([]*Node, error) { + res := &gremlin.Response{} + query, bindings := nq.gremlinQuery().ValueMap(true).Query() + if err := nq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var ns Nodes + if err := ns.FromResponse(res); err != nil { + return nil, err + } + ns.config(nq.config) + return ns, nil +} + +func (nq *NodeQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := nq.gremlinQuery().Count().Query() + if err := nq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (nq *NodeQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := nq.gremlinQuery().HasNext().Query() + if err := nq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (nq *NodeQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(node.Label) + if nq.gremlin != nil { + v = nq.gremlin.Clone() + } + for _, p := range nq.predicates { + p.Gremlin(v) + } + if len(nq.order) > 0 { + v.Order() + for _, p := range nq.order { + p.Gremlin(v) + } + } + if limit := nq.limit; limit != nil { + v.Limit(*limit) + } + if unique := nq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// NodeQuery is the builder for group-by Node entities. +type NodeGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ngb *NodeGroupBy) Aggregate(fns ...Aggregate) *NodeGroupBy { + ngb.fns = append(ngb.fns, fns...) + return ngb +} + +// Scan applies the group-by query and scan the result into the given value. +func (ngb *NodeGroupBy) Scan(ctx context.Context, v interface{}) error { + switch ngb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return ngb.sqlScan(ctx, v) + case dialect.Neptune: + return ngb.gremlinScan(ctx, v) + default: + return errors.New("ngb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (ngb *NodeGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := ngb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (ngb *NodeGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(ngb.fields) > 1 { + return nil, errors.New("ent: NodeGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := ngb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (ngb *NodeGroupBy) StringsX(ctx context.Context) []string { + v, err := ngb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (ngb *NodeGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(ngb.fields) > 1 { + return nil, errors.New("ent: NodeGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := ngb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (ngb *NodeGroupBy) IntsX(ctx context.Context) []int { + v, err := ngb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (ngb *NodeGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(ngb.fields) > 1 { + return nil, errors.New("ent: NodeGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := ngb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (ngb *NodeGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := ngb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (ngb *NodeGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(ngb.fields) > 1 { + return nil, errors.New("ent: NodeGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := ngb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (ngb *NodeGroupBy) BoolsX(ctx context.Context) []bool { + v, err := ngb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (ngb *NodeGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := ngb.sqlQuery().Query() + if err := ngb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (ngb *NodeGroupBy) sqlQuery() *sql.Selector { + selector := ngb.sql + columns := make([]string, 0, len(ngb.fields)+len(ngb.fns)) + columns = append(columns, ngb.fields...) + for _, fn := range ngb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(ngb.fields...) +} + +func (ngb *NodeGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := ngb.gremlinQuery().Query() + if err := ngb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(ngb.fields)+len(ngb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (ngb *NodeGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range ngb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range ngb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return ngb.gremlin.Group(). + By(__.Values(ngb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/node_update.go b/entc/integration/ent/node_update.go new file mode 100644 index 000000000..8ecd2326e --- /dev/null +++ b/entc/integration/ent/node_update.go @@ -0,0 +1,629 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/node" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// NodeUpdate is the builder for updating Node entities. +type NodeUpdate struct { + config + value *int + prev map[string]struct{} + next map[string]struct{} + clearedPrev bool + clearedNext bool + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (nu *NodeUpdate) Where(ps ...ent.Predicate) *NodeUpdate { + nu.predicates = append(nu.predicates, ps...) + return nu +} + +// SetValue sets the value field. +func (nu *NodeUpdate) SetValue(i int) *NodeUpdate { + nu.value = &i + return nu +} + +// SetNillableValue sets the value field if the given value is not nil. +func (nu *NodeUpdate) SetNillableValue(i *int) *NodeUpdate { + if i != nil { + nu.SetValue(*i) + } + return nu +} + +// SetPrevID sets the prev edge to Node by id. +func (nu *NodeUpdate) SetPrevID(id string) *NodeUpdate { + if nu.prev == nil { + nu.prev = make(map[string]struct{}) + } + nu.prev[id] = struct{}{} + return nu +} + +// SetNillablePrevID sets the prev edge to Node by id if the given value is not nil. +func (nu *NodeUpdate) SetNillablePrevID(id *string) *NodeUpdate { + if id != nil { + nu = nu.SetPrevID(*id) + } + return nu +} + +// SetPrev sets the prev edge to Node. +func (nu *NodeUpdate) SetPrev(n *Node) *NodeUpdate { + return nu.SetPrevID(n.ID) +} + +// SetNextID sets the next edge to Node by id. +func (nu *NodeUpdate) SetNextID(id string) *NodeUpdate { + if nu.next == nil { + nu.next = make(map[string]struct{}) + } + nu.next[id] = struct{}{} + return nu +} + +// SetNillableNextID sets the next edge to Node by id if the given value is not nil. +func (nu *NodeUpdate) SetNillableNextID(id *string) *NodeUpdate { + if id != nil { + nu = nu.SetNextID(*id) + } + return nu +} + +// SetNext sets the next edge to Node. +func (nu *NodeUpdate) SetNext(n *Node) *NodeUpdate { + return nu.SetNextID(n.ID) +} + +// ClearPrev clears the prev edge to Node. +func (nu *NodeUpdate) ClearPrev() *NodeUpdate { + nu.clearedPrev = true + return nu +} + +// ClearNext clears the next edge to Node. +func (nu *NodeUpdate) ClearNext() *NodeUpdate { + nu.clearedNext = true + return nu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (nu *NodeUpdate) Save(ctx context.Context) (int, error) { + if len(nu.prev) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"prev\"") + } + if len(nu.next) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"next\"") + } + switch nu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := nu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (nu *NodeUpdate) SaveX(ctx context.Context) int { + affected, err := nu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (nu *NodeUpdate) Exec(ctx context.Context) error { + _, err := nu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nu *NodeUpdate) ExecX(ctx context.Context) { + if err := nu.Exec(ctx); err != nil { + panic(err) + } +} + +func (nu *NodeUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(node.FieldID).From(sql.Table(node.Table)) + for _, p := range nu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = nu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := nu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(node.Table).Where(sql.InInts(node.FieldID, ids...)) + ) + if nu.value != nil { + update = true + builder.Set(node.FieldValue, *nu.value) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if nu.clearedPrev { + query, args := sql.Update(node.PrevTable). + SetNull(node.PrevColumn). + Where(sql.InInts(node.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(nu.prev) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(nu.prev)[0]) + if serr != nil { + return 0, err + } + query, args := sql.Update(node.PrevTable). + Set(node.PrevColumn, eid). + Where(sql.EQ(node.FieldID, id).And().IsNull(node.PrevColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(nu.prev) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"prev\" %v already connected to a different \"Node\"", keys(nu.prev))}) + } + } + } + if nu.clearedNext { + query, args := sql.Update(node.NextTable). + SetNull(node.NextColumn). + Where(sql.InInts(node.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(nu.next) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(nu.next)[0]) + if serr != nil { + return 0, err + } + query, args := sql.Update(node.NextTable). + Set(node.NextColumn, id). + Where(sql.EQ(node.FieldID, eid).And().IsNull(node.NextColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(nu.next) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"next\" %v already connected to a different \"Node\"", keys(nu.next))}) + } + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (nu *NodeUpdate) gremlinSave(ctx context.Context) ([]*Node, error) { + res := &gremlin.Response{} + query, bindings := nu.gremlin().Query() + if err := nu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var ns Nodes + ns.config(nu.config) + if err := ns.FromResponse(res); err != nil { + return nil, err + } + return ns, nil +} + +func (nu *NodeUpdate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 2) + v := g.V().HasLabel(node.Label) + for _, p := range nu.predicates { + p.Gremlin(v) + } + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if nu.value != nil { + v.Property(dsl.Single, node.FieldValue, *nu.value) + } + if nu.clearedPrev { + tr := rv.Clone().InE(node.NextLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range nu.prev { + v.AddE(node.NextLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(node.NextLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(node.Label, node.NextLabel, id)), + }) + } + if nu.clearedNext { + tr := rv.Clone().OutE(node.NextLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range nu.next { + v.AddE(node.NextLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(node.NextLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(node.Label, node.NextLabel, id)), + }) + } + v.ValueMap(true) + if len(constraints) > 0 { + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} + +// NodeUpdateOne is the builder for updating a single Node entity. +type NodeUpdateOne struct { + config + id string + value *int + prev map[string]struct{} + next map[string]struct{} + clearedPrev bool + clearedNext bool +} + +// SetValue sets the value field. +func (nuo *NodeUpdateOne) SetValue(i int) *NodeUpdateOne { + nuo.value = &i + return nuo +} + +// SetNillableValue sets the value field if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableValue(i *int) *NodeUpdateOne { + if i != nil { + nuo.SetValue(*i) + } + return nuo +} + +// SetPrevID sets the prev edge to Node by id. +func (nuo *NodeUpdateOne) SetPrevID(id string) *NodeUpdateOne { + if nuo.prev == nil { + nuo.prev = make(map[string]struct{}) + } + nuo.prev[id] = struct{}{} + return nuo +} + +// SetNillablePrevID sets the prev edge to Node by id if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillablePrevID(id *string) *NodeUpdateOne { + if id != nil { + nuo = nuo.SetPrevID(*id) + } + return nuo +} + +// SetPrev sets the prev edge to Node. +func (nuo *NodeUpdateOne) SetPrev(n *Node) *NodeUpdateOne { + return nuo.SetPrevID(n.ID) +} + +// SetNextID sets the next edge to Node by id. +func (nuo *NodeUpdateOne) SetNextID(id string) *NodeUpdateOne { + if nuo.next == nil { + nuo.next = make(map[string]struct{}) + } + nuo.next[id] = struct{}{} + return nuo +} + +// SetNillableNextID sets the next edge to Node by id if the given value is not nil. +func (nuo *NodeUpdateOne) SetNillableNextID(id *string) *NodeUpdateOne { + if id != nil { + nuo = nuo.SetNextID(*id) + } + return nuo +} + +// SetNext sets the next edge to Node. +func (nuo *NodeUpdateOne) SetNext(n *Node) *NodeUpdateOne { + return nuo.SetNextID(n.ID) +} + +// ClearPrev clears the prev edge to Node. +func (nuo *NodeUpdateOne) ClearPrev() *NodeUpdateOne { + nuo.clearedPrev = true + return nuo +} + +// ClearNext clears the next edge to Node. +func (nuo *NodeUpdateOne) ClearNext() *NodeUpdateOne { + nuo.clearedNext = true + return nuo +} + +// Save executes the query and returns the updated entity. +func (nuo *NodeUpdateOne) Save(ctx context.Context) (*Node, error) { + if len(nuo.prev) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"prev\"") + } + if len(nuo.next) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"next\"") + } + switch nuo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return nuo.sqlSave(ctx) + case dialect.Neptune: + return nuo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (nuo *NodeUpdateOne) SaveX(ctx context.Context) *Node { + n, err := nuo.Save(ctx) + if err != nil { + panic(err) + } + return n +} + +// Exec executes the query on the entity. +func (nuo *NodeUpdateOne) Exec(ctx context.Context) error { + _, err := nuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nuo *NodeUpdateOne) ExecX(ctx context.Context) { + if err := nuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (nuo *NodeUpdateOne) sqlSave(ctx context.Context) (n *Node, err error) { + selector := sql.Select(node.Columns...).From(sql.Table(node.Table)) + node.ID(nuo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = nuo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + n = &Node{config: nuo.config} + if err := n.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into Node: %v", err) + } + id = n.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: Node not found with id: %v", nuo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one Node with the same id: %v", nuo.id) + } + + tx, err := nuo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(node.Table).Where(sql.InInts(node.FieldID, ids...)) + ) + if nuo.value != nil { + update = true + builder.Set(node.FieldValue, *nuo.value) + n.Value = *nuo.value + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if nuo.clearedPrev { + query, args := sql.Update(node.PrevTable). + SetNull(node.PrevColumn). + Where(sql.InInts(node.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(nuo.prev) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(nuo.prev)[0]) + if serr != nil { + return nil, err + } + query, args := sql.Update(node.PrevTable). + Set(node.PrevColumn, eid). + Where(sql.EQ(node.FieldID, id).And().IsNull(node.PrevColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(nuo.prev) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"prev\" %v already connected to a different \"Node\"", keys(nuo.prev))}) + } + } + } + if nuo.clearedNext { + query, args := sql.Update(node.NextTable). + SetNull(node.NextColumn). + Where(sql.InInts(node.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(nuo.next) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(nuo.next)[0]) + if serr != nil { + return nil, err + } + query, args := sql.Update(node.NextTable). + Set(node.NextColumn, id). + Where(sql.EQ(node.FieldID, eid).And().IsNull(node.NextColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(nuo.next) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"next\" %v already connected to a different \"Node\"", keys(nuo.next))}) + } + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return n, nil +} + +func (nuo *NodeUpdateOne) gremlinSave(ctx context.Context) (*Node, error) { + res := &gremlin.Response{} + query, bindings := nuo.gremlin(nuo.id).Query() + if err := nuo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + n := &Node{config: nuo.config} + if err := n.FromResponse(res); err != nil { + return nil, err + } + return n, nil +} + +func (nuo *NodeUpdateOne) gremlin(id string) *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 2) + v := g.V(id) + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if nuo.value != nil { + v.Property(dsl.Single, node.FieldValue, *nuo.value) + } + if nuo.clearedPrev { + tr := rv.Clone().InE(node.NextLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range nuo.prev { + v.AddE(node.NextLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(node.NextLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(node.Label, node.NextLabel, id)), + }) + } + if nuo.clearedNext { + tr := rv.Clone().OutE(node.NextLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range nuo.next { + v.AddE(node.NextLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(node.NextLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(node.Label, node.NextLabel, id)), + }) + } + v.ValueMap(true) + if len(constraints) > 0 { + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/pet.go b/entc/integration/ent/pet.go new file mode 100644 index 000000000..3c83c6410 --- /dev/null +++ b/entc/integration/ent/pet.go @@ -0,0 +1,145 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// Pet is the model entity for the Pet schema. +type Pet struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` +} + +// FromResponse scans the gremlin response data into Pet. +func (pe *Pet) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vpe struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } + if err := vmap.Decode(&vpe); err != nil { + return err + } + pe.ID = vpe.ID + pe.Name = vpe.Name + return nil +} + +// FromRows scans the sql response data into Pet. +func (pe *Pet) FromRows(rows *sql.Rows) error { + var vpe struct { + ID int + Name string + } + // the order here should be the same as in the `pet.Columns`. + if err := rows.Scan( + &vpe.ID, + &vpe.Name, + ); err != nil { + return err + } + pe.ID = strconv.Itoa(vpe.ID) + pe.Name = vpe.Name + return nil +} + +// QueryTeam queries the team edge of the Pet. +func (pe *Pet) QueryTeam() *UserQuery { + return (&PetClient{pe.config}).QueryTeam(pe) +} + +// QueryOwner queries the owner edge of the Pet. +func (pe *Pet) QueryOwner() *UserQuery { + return (&PetClient{pe.config}).QueryOwner(pe) +} + +// Update returns a builder for updating this Pet. +// Note that, you need to call Pet.Unwrap() before calling this method, if this Pet +// was returned from a transaction, and the transaction was committed or rolled back. +func (pe *Pet) Update() *PetUpdateOne { + return (&PetClient{pe.config}).UpdateOne(pe) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (pe *Pet) Unwrap() *Pet { + tx, ok := pe.config.driver.(*txDriver) + if !ok { + panic("ent: Pet is not a transactional entity") + } + pe.config.driver = tx.drv + return pe +} + +// String implements the fmt.Stringer. +func (pe *Pet) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Pet(") + buf.WriteString(fmt.Sprintf("id=%v,", pe.ID)) + buf.WriteString(fmt.Sprintf("name=%v", pe.Name)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (pe *Pet) id() int { + id, _ := strconv.Atoi(pe.ID) + return id +} + +// Pets is a parsable slice of Pet. +type Pets []*Pet + +// FromResponse scans the gremlin response data into Pets. +func (pe *Pets) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vpe []struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } + if err := vmap.Decode(&vpe); err != nil { + return err + } + for _, v := range vpe { + *pe = append(*pe, &Pet{ + ID: v.ID, + Name: v.Name, + }) + } + return nil +} + +// FromRows scans the sql response data into Pets. +func (pe *Pets) FromRows(rows *sql.Rows) error { + for rows.Next() { + vpe := &Pet{} + if err := vpe.FromRows(rows); err != nil { + return err + } + *pe = append(*pe, vpe) + } + return nil +} + +func (pe Pets) config(cfg config) { + for i := range pe { + pe[i].config = cfg + } +} diff --git a/entc/integration/ent/pet/pet.go b/entc/integration/ent/pet/pet.go new file mode 100644 index 000000000..a33574242 --- /dev/null +++ b/entc/integration/ent/pet/pet.go @@ -0,0 +1,38 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package pet + +const ( + // Label holds the string label denoting the pet type in the database. + Label = "pet" + // TeamInverseLabel holds the string label denoting the team inverse edge type in the database. + TeamInverseLabel = "user_team" + // OwnerInverseLabel holds the string label denoting the owner inverse edge type in the database. + OwnerInverseLabel = "user_pets" + // FieldName holds the string denoting the name vertex property in the database. + FieldName = "name" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the pet in the database. + Table = "pets" + // TeamTable is the table the holds the team relation/edge. + TeamTable = "pets" + // TeamInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + TeamInverseTable = "users" + // TeamColumn is the table column denoting the team relation/edge. + TeamColumn = "team_id" + // OwnerTable is the table the holds the owner relation/edge. + OwnerTable = "pets" + // OwnerInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + OwnerInverseTable = "users" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "owner_id" +) + +// Columns holds all SQL columns are pet fields. +var Columns = []string{ + FieldID, + FieldName, +} diff --git a/entc/integration/ent/pet/where.go b/entc/integration/ent/pet/where.go new file mode 100644 index 000000000..f78837946 --- /dev/null +++ b/entc/integration/ent/pet/where.go @@ -0,0 +1,389 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package pet + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.NEQ(v)) + }, + } +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GT(v)) + }, + } +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GTE(v)) + }, + } +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LT(v)) + }, + } +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LTE(v)) + }, + } +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Within(v...)) + }, + } +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Without(v...)) + }, + } +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Containing(v)) + }, + } +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.StartingWith(v)) + }, + } +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EndingWith(v)) + }, + } +} + +// HasTeam applies the HasEdge predicate on the "team" edge. +func HasTeam() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where(sql.NotNull(t1.C(TeamColumn))) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(TeamInverseLabel).InV() + }, + } +} + +// HasTeamWith applies the HasEdge predicate on the "team" edge with a given conditions (other predicates). +func HasTeamWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FieldID).From(sql.Table(TeamInverseTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(TeamColumn), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(TeamInverseLabel).Where(tr).InV() + }, + } +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where(sql.NotNull(t1.C(OwnerColumn))) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(OwnerInverseLabel).InV() + }, + } +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FieldID).From(sql.Table(OwnerInverseTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(OwnerColumn), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(OwnerInverseLabel).Where(tr).InV() + }, + } +} diff --git a/entc/integration/ent/pet_create.go b/entc/integration/ent/pet_create.go new file mode 100644 index 000000000..776f38ab4 --- /dev/null +++ b/entc/integration/ent/pet_create.go @@ -0,0 +1,220 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// PetCreate is the builder for creating a Pet entity. +type PetCreate struct { + config + name *string + team map[string]struct{} + owner map[string]struct{} +} + +// SetName sets the name field. +func (pc *PetCreate) SetName(s string) *PetCreate { + pc.name = &s + return pc +} + +// SetTeamID sets the team edge to User by id. +func (pc *PetCreate) SetTeamID(id string) *PetCreate { + if pc.team == nil { + pc.team = make(map[string]struct{}) + } + pc.team[id] = struct{}{} + return pc +} + +// SetNillableTeamID sets the team edge to User by id if the given value is not nil. +func (pc *PetCreate) SetNillableTeamID(id *string) *PetCreate { + if id != nil { + pc = pc.SetTeamID(*id) + } + return pc +} + +// SetTeam sets the team edge to User. +func (pc *PetCreate) SetTeam(u *User) *PetCreate { + return pc.SetTeamID(u.ID) +} + +// SetOwnerID sets the owner edge to User by id. +func (pc *PetCreate) SetOwnerID(id string) *PetCreate { + if pc.owner == nil { + pc.owner = make(map[string]struct{}) + } + pc.owner[id] = struct{}{} + return pc +} + +// SetNillableOwnerID sets the owner edge to User by id if the given value is not nil. +func (pc *PetCreate) SetNillableOwnerID(id *string) *PetCreate { + if id != nil { + pc = pc.SetOwnerID(*id) + } + return pc +} + +// SetOwner sets the owner edge to User. +func (pc *PetCreate) SetOwner(u *User) *PetCreate { + return pc.SetOwnerID(u.ID) +} + +// Save creates the Pet in the database. +func (pc *PetCreate) Save(ctx context.Context) (*Pet, error) { + if pc.name == nil { + return nil, errors.New("ent: missing required field \"name\"") + } + if len(pc.team) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"team\"") + } + if len(pc.owner) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"owner\"") + } + switch pc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pc.sqlSave(ctx) + case dialect.Neptune: + return pc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (pc *PetCreate) SaveX(ctx context.Context) *Pet { + v, err := pc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (pc *PetCreate) sqlSave(ctx context.Context) (*Pet, error) { + var ( + res sql.Result + pe = &Pet{config: pc.config} + ) + tx, err := pc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(pet.Table).Default(pc.driver.Dialect()) + if pc.name != nil { + builder.Set(pet.FieldName, *pc.name) + pe.Name = *pc.name + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + pe.ID = strconv.FormatInt(id, 10) + if len(pc.team) > 0 { + eid, err := strconv.Atoi(keys(pc.team)[0]) + if err != nil { + return nil, err + } + query, args := sql.Update(pet.TeamTable). + Set(pet.TeamColumn, eid). + Where(sql.EQ(pet.FieldID, id).And().IsNull(pet.TeamColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(pc.team) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"team\" %v already connected to a different \"Pet\"", keys(pc.team))}) + } + } + if len(pc.owner) > 0 { + for eid := range pc.owner { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + query, args := sql.Update(pet.OwnerTable). + Set(pet.OwnerColumn, eid). + Where(sql.EQ(pet.FieldID, id)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if err := tx.Commit(); err != nil { + return nil, err + } + return pe, nil +} + +func (pc *PetCreate) gremlinSave(ctx context.Context) (*Pet, error) { + res := &gremlin.Response{} + query, bindings := pc.gremlin().Query() + if err := pc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + pe := &Pet{config: pc.config} + if err := pe.FromResponse(res); err != nil { + return nil, err + } + return pe, nil +} + +func (pc *PetCreate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.AddV(pet.Label) + if pc.name != nil { + v.Property(dsl.Single, pet.FieldName, *pc.name) + } + for id := range pc.team { + v.AddE(user.TeamLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.TeamLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(pet.Label, user.TeamLabel, id)), + }) + } + for id := range pc.owner { + v.AddE(user.PetsLabel).From(g.V(id)).InV() + } + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr +} diff --git a/entc/integration/ent/pet_delete.go b/entc/integration/ent/pet_delete.go new file mode 100644 index 000000000..39ab997d8 --- /dev/null +++ b/entc/integration/ent/pet_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/pet" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// PetDelete is the builder for deleting a Pet entity. +type PetDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (pd *PetDelete) Where(ps ...ent.Predicate) *PetDelete { + pd.predicates = append(pd.predicates, ps...) + return pd +} + +// Exec executes the deletion query. +func (pd *PetDelete) Exec(ctx context.Context) error { + switch pd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pd.sqlExec(ctx) + case dialect.Neptune: + return pd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pd *PetDelete) ExecX(ctx context.Context) { + if err := pd.Exec(ctx); err != nil { + panic(err) + } +} + +func (pd *PetDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(pet.Table)) + for _, p := range pd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(pet.Table).FromSelect(selector).Query() + return pd.driver.Exec(ctx, query, args, &res) +} + +func (pd *PetDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := pd.gremlin().Query() + return pd.driver.Exec(ctx, query, bindings, res) +} + +func (pd *PetDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(pet.Label) + for _, p := range pd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// PetDeleteOne is the builder for deleting a single Pet entity. +type PetDeleteOne struct { + pd *PetDelete +} + +// Exec executes the deletion query. +func (pdo *PetDeleteOne) Exec(ctx context.Context) error { + return pdo.pd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pdo *PetDeleteOne) ExecX(ctx context.Context) { + pdo.pd.ExecX(ctx) +} diff --git a/entc/integration/ent/pet_query.go b/entc/integration/ent/pet_query.go new file mode 100644 index 000000000..1e8233e53 --- /dev/null +++ b/entc/integration/ent/pet_query.go @@ -0,0 +1,637 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// PetQuery is the builder for querying Pet entities. +type PetQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (pq *PetQuery) Where(ps ...ent.Predicate) *PetQuery { + pq.predicates = append(pq.predicates, ps...) + return pq +} + +// Limit adds a limit step to the query. +func (pq *PetQuery) Limit(limit int) *PetQuery { + pq.limit = &limit + return pq +} + +// Order adds an order step to the query. +func (pq *PetQuery) Order(o ...Order) *PetQuery { + pq.order = append(pq.order, o...) + return pq +} + +// QueryTeam chains the current query on the team edge. +func (pq *PetQuery) QueryTeam() *UserQuery { + query := &UserQuery{config: pq.config} + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := pq.sqlQuery() + t2.Select(t2.C(pet.TeamColumn)) + query.sql = sql.Select(t1.Columns(user.Columns...)...). + From(t1). + Join(t2). + On(t1.C(user.FieldID), t2.C(pet.TeamColumn)) + case dialect.Neptune: + gremlin := pq.gremlinQuery() + query.gremlin = gremlin.InE(user.TeamLabel).OutV() + } + return query +} + +// QueryOwner chains the current query on the owner edge. +func (pq *PetQuery) QueryOwner() *UserQuery { + query := &UserQuery{config: pq.config} + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := pq.sqlQuery() + t2.Select(t2.C(pet.OwnerColumn)) + query.sql = sql.Select(t1.Columns(user.Columns...)...). + From(t1). + Join(t2). + On(t1.C(user.FieldID), t2.C(pet.OwnerColumn)) + case dialect.Neptune: + gremlin := pq.gremlinQuery() + query.gremlin = gremlin.InE(user.PetsLabel).OutV() + } + return query +} + +// Get returns a Pet entity by its id. +func (pq *PetQuery) Get(ctx context.Context, id string) (*Pet, error) { + return pq.Where(pet.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (pq *PetQuery) GetX(ctx context.Context, id string) *Pet { + pe, err := pq.Get(ctx, id) + if err != nil { + panic(err) + } + return pe +} + +// First returns the first Pet entity in the query. Returns *ErrNotFound when no pet was found. +func (pq *PetQuery) First(ctx context.Context) (*Pet, error) { + pes, err := pq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(pes) == 0 { + return nil, &ErrNotFound{pet.Label} + } + return pes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pq *PetQuery) FirstX(ctx context.Context) *Pet { + pe, err := pq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return pe +} + +// FirstID returns the first Pet id in the query. Returns *ErrNotFound when no id was found. +func (pq *PetQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = pq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{pet.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (pq *PetQuery) FirstXID(ctx context.Context) string { + id, err := pq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Pet entity in the query, returns an error if not exactly one entity was returned. +func (pq *PetQuery) Only(ctx context.Context) (*Pet, error) { + pes, err := pq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(pes) { + case 1: + return pes[0], nil + case 0: + return nil, &ErrNotFound{pet.Label} + default: + return nil, &ErrNotSingular{pet.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pq *PetQuery) OnlyX(ctx context.Context) *Pet { + pe, err := pq.Only(ctx) + if err != nil { + panic(err) + } + return pe +} + +// OnlyID returns the only Pet id in the query, returns an error if not exactly one id was returned. +func (pq *PetQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = pq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{pet.Label} + default: + err = &ErrNotSingular{pet.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (pq *PetQuery) OnlyXID(ctx context.Context) string { + id, err := pq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Pets. +func (pq *PetQuery) All(ctx context.Context) ([]*Pet, error) { + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pq.sqlAll(ctx) + case dialect.Neptune: + return pq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (pq *PetQuery) AllX(ctx context.Context) []*Pet { + pes, err := pq.All(ctx) + if err != nil { + panic(err) + } + return pes +} + +// IDs executes the query and returns a list of Pet ids. +func (pq *PetQuery) IDs(ctx context.Context) ([]string, error) { + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pq.sqlIDs(ctx) + case dialect.Neptune: + return pq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (pq *PetQuery) IDsX(ctx context.Context) []string { + ids, err := pq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pq *PetQuery) Count(ctx context.Context) (int, error) { + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pq.sqlCount(ctx) + case dialect.Neptune: + return pq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (pq *PetQuery) CountX(ctx context.Context) int { + count, err := pq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pq *PetQuery) Exist(ctx context.Context) (bool, error) { + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pq.sqlExist(ctx) + case dialect.Neptune: + return pq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pq *PetQuery) ExistX(ctx context.Context) bool { + exist, err := pq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Pet.Query(). +// GroupBy(pet.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (pq *PetQuery) GroupBy(field string, fields ...string) *PetGroupBy { + group := &PetGroupBy{config: pq.config} + group.fields = append([]string{field}, fields...) + switch pq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = pq.sqlQuery() + case dialect.Neptune: + group.gremlin = pq.gremlinQuery() + } + return group +} + +func (pq *PetQuery) sqlAll(ctx context.Context) ([]*Pet, error) { + rows := &sql.Rows{} + selector := pq.sqlQuery() + if unique := pq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := pq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var pes Pets + if err := pes.FromRows(rows); err != nil { + return nil, err + } + pes.config(pq.config) + return pes, nil +} + +func (pq *PetQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := pq.sqlQuery() + unique := []string{pet.FieldID} + if len(pq.unique) > 0 { + unique = pq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := pq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (pq *PetQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := pq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (pq *PetQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := pq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (pq *PetQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(pet.Table) + selector := sql.Select(t1.Columns(pet.Columns...)...).From(t1) + if pq.sql != nil { + selector = pq.sql + selector.Select(selector.Columns(pet.Columns...)...) + } + for _, p := range pq.predicates { + p.SQL(selector) + } + for _, p := range pq.order { + p.SQL(selector) + } + if limit := pq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (pq *PetQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := pq.gremlinQuery().Query() + if err := pq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (pq *PetQuery) gremlinAll(ctx context.Context) ([]*Pet, error) { + res := &gremlin.Response{} + query, bindings := pq.gremlinQuery().ValueMap(true).Query() + if err := pq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var pes Pets + if err := pes.FromResponse(res); err != nil { + return nil, err + } + pes.config(pq.config) + return pes, nil +} + +func (pq *PetQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := pq.gremlinQuery().Count().Query() + if err := pq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (pq *PetQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := pq.gremlinQuery().HasNext().Query() + if err := pq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (pq *PetQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(pet.Label) + if pq.gremlin != nil { + v = pq.gremlin.Clone() + } + for _, p := range pq.predicates { + p.Gremlin(v) + } + if len(pq.order) > 0 { + v.Order() + for _, p := range pq.order { + p.Gremlin(v) + } + } + if limit := pq.limit; limit != nil { + v.Limit(*limit) + } + if unique := pq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// PetQuery is the builder for group-by Pet entities. +type PetGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pgb *PetGroupBy) Aggregate(fns ...Aggregate) *PetGroupBy { + pgb.fns = append(pgb.fns, fns...) + return pgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (pgb *PetGroupBy) Scan(ctx context.Context, v interface{}) error { + switch pgb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pgb.sqlScan(ctx, v) + case dialect.Neptune: + return pgb.gremlinScan(ctx, v) + default: + return errors.New("pgb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (pgb *PetGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := pgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (pgb *PetGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(pgb.fields) > 1 { + return nil, errors.New("ent: PetGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := pgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (pgb *PetGroupBy) StringsX(ctx context.Context) []string { + v, err := pgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (pgb *PetGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(pgb.fields) > 1 { + return nil, errors.New("ent: PetGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := pgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (pgb *PetGroupBy) IntsX(ctx context.Context) []int { + v, err := pgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (pgb *PetGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(pgb.fields) > 1 { + return nil, errors.New("ent: PetGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := pgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (pgb *PetGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := pgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (pgb *PetGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(pgb.fields) > 1 { + return nil, errors.New("ent: PetGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := pgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (pgb *PetGroupBy) BoolsX(ctx context.Context) []bool { + v, err := pgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (pgb *PetGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := pgb.sqlQuery().Query() + if err := pgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (pgb *PetGroupBy) sqlQuery() *sql.Selector { + selector := pgb.sql + columns := make([]string, 0, len(pgb.fields)+len(pgb.fns)) + columns = append(columns, pgb.fields...) + for _, fn := range pgb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(pgb.fields...) +} + +func (pgb *PetGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := pgb.gremlinQuery().Query() + if err := pgb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(pgb.fields)+len(pgb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (pgb *PetGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range pgb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range pgb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return pgb.gremlin.Group(). + By(__.Values(pgb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/pet_update.go b/entc/integration/ent/pet_update.go new file mode 100644 index 000000000..84a3f7072 --- /dev/null +++ b/entc/integration/ent/pet_update.go @@ -0,0 +1,594 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// PetUpdate is the builder for updating Pet entities. +type PetUpdate struct { + config + name *string + team map[string]struct{} + owner map[string]struct{} + clearedTeam bool + clearedOwner bool + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (pu *PetUpdate) Where(ps ...ent.Predicate) *PetUpdate { + pu.predicates = append(pu.predicates, ps...) + return pu +} + +// SetName sets the name field. +func (pu *PetUpdate) SetName(s string) *PetUpdate { + pu.name = &s + return pu +} + +// SetTeamID sets the team edge to User by id. +func (pu *PetUpdate) SetTeamID(id string) *PetUpdate { + if pu.team == nil { + pu.team = make(map[string]struct{}) + } + pu.team[id] = struct{}{} + return pu +} + +// SetNillableTeamID sets the team edge to User by id if the given value is not nil. +func (pu *PetUpdate) SetNillableTeamID(id *string) *PetUpdate { + if id != nil { + pu = pu.SetTeamID(*id) + } + return pu +} + +// SetTeam sets the team edge to User. +func (pu *PetUpdate) SetTeam(u *User) *PetUpdate { + return pu.SetTeamID(u.ID) +} + +// SetOwnerID sets the owner edge to User by id. +func (pu *PetUpdate) SetOwnerID(id string) *PetUpdate { + if pu.owner == nil { + pu.owner = make(map[string]struct{}) + } + pu.owner[id] = struct{}{} + return pu +} + +// SetNillableOwnerID sets the owner edge to User by id if the given value is not nil. +func (pu *PetUpdate) SetNillableOwnerID(id *string) *PetUpdate { + if id != nil { + pu = pu.SetOwnerID(*id) + } + return pu +} + +// SetOwner sets the owner edge to User. +func (pu *PetUpdate) SetOwner(u *User) *PetUpdate { + return pu.SetOwnerID(u.ID) +} + +// ClearTeam clears the team edge to User. +func (pu *PetUpdate) ClearTeam() *PetUpdate { + pu.clearedTeam = true + return pu +} + +// ClearOwner clears the owner edge to User. +func (pu *PetUpdate) ClearOwner() *PetUpdate { + pu.clearedOwner = true + return pu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (pu *PetUpdate) Save(ctx context.Context) (int, error) { + if len(pu.team) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"team\"") + } + if len(pu.owner) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"owner\"") + } + switch pu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return pu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := pu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (pu *PetUpdate) SaveX(ctx context.Context) int { + affected, err := pu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pu *PetUpdate) Exec(ctx context.Context) error { + _, err := pu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pu *PetUpdate) ExecX(ctx context.Context) { + if err := pu.Exec(ctx); err != nil { + panic(err) + } +} + +func (pu *PetUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(pet.FieldID).From(sql.Table(pet.Table)) + for _, p := range pu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = pu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := pu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(pet.Table).Where(sql.InInts(pet.FieldID, ids...)) + ) + if pu.name != nil { + update = true + builder.Set(pet.FieldName, *pu.name) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if pu.clearedTeam { + query, args := sql.Update(pet.TeamTable). + SetNull(pet.TeamColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(pu.team) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(pu.team)[0]) + if serr != nil { + return 0, err + } + query, args := sql.Update(pet.TeamTable). + Set(pet.TeamColumn, eid). + Where(sql.EQ(pet.FieldID, id).And().IsNull(pet.TeamColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(pu.team) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"team\" %v already connected to a different \"Pet\"", keys(pu.team))}) + } + } + } + if pu.clearedOwner { + query, args := sql.Update(pet.OwnerTable). + SetNull(pet.OwnerColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(pu.owner) > 0 { + for eid := range pu.owner { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(pet.OwnerTable). + Set(pet.OwnerColumn, eid). + Where(sql.InInts(pet.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (pu *PetUpdate) gremlinSave(ctx context.Context) ([]*Pet, error) { + res := &gremlin.Response{} + query, bindings := pu.gremlin().Query() + if err := pu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var pes Pets + pes.config(pu.config) + if err := pes.FromResponse(res); err != nil { + return nil, err + } + return pes, nil +} + +func (pu *PetUpdate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.V().HasLabel(pet.Label) + for _, p := range pu.predicates { + p.Gremlin(v) + } + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if pu.name != nil { + v.Property(dsl.Single, pet.FieldName, *pu.name) + } + if pu.clearedTeam { + tr := rv.Clone().InE(user.TeamLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range pu.team { + v.AddE(user.TeamLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.TeamLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(pet.Label, user.TeamLabel, id)), + }) + } + if pu.clearedOwner { + tr := rv.Clone().InE(user.PetsLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range pu.owner { + v.AddE(user.PetsLabel).From(g.V(id)).InV() + } + v.ValueMap(true) + if len(constraints) > 0 { + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} + +// PetUpdateOne is the builder for updating a single Pet entity. +type PetUpdateOne struct { + config + id string + name *string + team map[string]struct{} + owner map[string]struct{} + clearedTeam bool + clearedOwner bool +} + +// SetName sets the name field. +func (puo *PetUpdateOne) SetName(s string) *PetUpdateOne { + puo.name = &s + return puo +} + +// SetTeamID sets the team edge to User by id. +func (puo *PetUpdateOne) SetTeamID(id string) *PetUpdateOne { + if puo.team == nil { + puo.team = make(map[string]struct{}) + } + puo.team[id] = struct{}{} + return puo +} + +// SetNillableTeamID sets the team edge to User by id if the given value is not nil. +func (puo *PetUpdateOne) SetNillableTeamID(id *string) *PetUpdateOne { + if id != nil { + puo = puo.SetTeamID(*id) + } + return puo +} + +// SetTeam sets the team edge to User. +func (puo *PetUpdateOne) SetTeam(u *User) *PetUpdateOne { + return puo.SetTeamID(u.ID) +} + +// SetOwnerID sets the owner edge to User by id. +func (puo *PetUpdateOne) SetOwnerID(id string) *PetUpdateOne { + if puo.owner == nil { + puo.owner = make(map[string]struct{}) + } + puo.owner[id] = struct{}{} + return puo +} + +// SetNillableOwnerID sets the owner edge to User by id if the given value is not nil. +func (puo *PetUpdateOne) SetNillableOwnerID(id *string) *PetUpdateOne { + if id != nil { + puo = puo.SetOwnerID(*id) + } + return puo +} + +// SetOwner sets the owner edge to User. +func (puo *PetUpdateOne) SetOwner(u *User) *PetUpdateOne { + return puo.SetOwnerID(u.ID) +} + +// ClearTeam clears the team edge to User. +func (puo *PetUpdateOne) ClearTeam() *PetUpdateOne { + puo.clearedTeam = true + return puo +} + +// ClearOwner clears the owner edge to User. +func (puo *PetUpdateOne) ClearOwner() *PetUpdateOne { + puo.clearedOwner = true + return puo +} + +// Save executes the query and returns the updated entity. +func (puo *PetUpdateOne) Save(ctx context.Context) (*Pet, error) { + if len(puo.team) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"team\"") + } + if len(puo.owner) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"owner\"") + } + switch puo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return puo.sqlSave(ctx) + case dialect.Neptune: + return puo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (puo *PetUpdateOne) SaveX(ctx context.Context) *Pet { + pe, err := puo.Save(ctx) + if err != nil { + panic(err) + } + return pe +} + +// Exec executes the query on the entity. +func (puo *PetUpdateOne) Exec(ctx context.Context) error { + _, err := puo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (puo *PetUpdateOne) ExecX(ctx context.Context) { + if err := puo.Exec(ctx); err != nil { + panic(err) + } +} + +func (puo *PetUpdateOne) sqlSave(ctx context.Context) (pe *Pet, err error) { + selector := sql.Select(pet.Columns...).From(sql.Table(pet.Table)) + pet.ID(puo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = puo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + pe = &Pet{config: puo.config} + if err := pe.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into Pet: %v", err) + } + id = pe.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: Pet not found with id: %v", puo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one Pet with the same id: %v", puo.id) + } + + tx, err := puo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(pet.Table).Where(sql.InInts(pet.FieldID, ids...)) + ) + if puo.name != nil { + update = true + builder.Set(pet.FieldName, *puo.name) + pe.Name = *puo.name + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if puo.clearedTeam { + query, args := sql.Update(pet.TeamTable). + SetNull(pet.TeamColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(puo.team) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(puo.team)[0]) + if serr != nil { + return nil, err + } + query, args := sql.Update(pet.TeamTable). + Set(pet.TeamColumn, eid). + Where(sql.EQ(pet.FieldID, id).And().IsNull(pet.TeamColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(puo.team) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"team\" %v already connected to a different \"Pet\"", keys(puo.team))}) + } + } + } + if puo.clearedOwner { + query, args := sql.Update(pet.OwnerTable). + SetNull(pet.OwnerColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(puo.owner) > 0 { + for eid := range puo.owner { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(pet.OwnerTable). + Set(pet.OwnerColumn, eid). + Where(sql.InInts(pet.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return pe, nil +} + +func (puo *PetUpdateOne) gremlinSave(ctx context.Context) (*Pet, error) { + res := &gremlin.Response{} + query, bindings := puo.gremlin(puo.id).Query() + if err := puo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + pe := &Pet{config: puo.config} + if err := pe.FromResponse(res); err != nil { + return nil, err + } + return pe, nil +} + +func (puo *PetUpdateOne) gremlin(id string) *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 1) + v := g.V(id) + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if puo.name != nil { + v.Property(dsl.Single, pet.FieldName, *puo.name) + } + if puo.clearedTeam { + tr := rv.Clone().InE(user.TeamLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range puo.team { + v.AddE(user.TeamLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.TeamLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(pet.Label, user.TeamLabel, id)), + }) + } + if puo.clearedOwner { + tr := rv.Clone().InE(user.PetsLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range puo.owner { + v.AddE(user.PetsLabel).From(g.V(id)).InV() + } + v.ValueMap(true) + if len(constraints) > 0 { + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/ent/schema/card.go b/entc/integration/ent/schema/card.go new file mode 100644 index 000000000..84625d48d --- /dev/null +++ b/entc/integration/ent/schema/card.go @@ -0,0 +1,30 @@ +package schema + +import ( + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" +) + +// Card holds the schema definition for the CreditCard entity. +type Card struct { + ent.Schema +} + +// Fields of the Comment. +func (Card) Fields() []ent.Field { + return []ent.Field{ + field.String("number"). + MinLen(1), + } +} + +// Edges of the Comment. +func (Card) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", User.Type). + Comment("O2O inverse edge"). + Ref("card"). + Unique(), + } +} diff --git a/entc/integration/ent/schema/comment.go b/entc/integration/ent/schema/comment.go new file mode 100644 index 000000000..65288c8e6 --- /dev/null +++ b/entc/integration/ent/schema/comment.go @@ -0,0 +1,18 @@ +package schema + +import "fbc/ent" + +// Comment holds the schema definition for the Comment entity. +type Comment struct { + ent.Schema +} + +// Fields of the Comment. +func (Comment) Fields() []ent.Field { + return nil +} + +// Edges of the Comment. +func (Comment) Edges() []ent.Edge { + return nil +} diff --git a/entc/integration/ent/schema/file.go b/entc/integration/ent/schema/file.go new file mode 100644 index 000000000..326252a49 --- /dev/null +++ b/entc/integration/ent/schema/file.go @@ -0,0 +1,20 @@ +package schema + +import ( + "fbc/ent" + "fbc/ent/field" +) + +// File holds the schema definition for the File entity. +type File struct { + ent.Schema +} + +// Fields of the File. +func (File) Fields() []ent.Field { + return []ent.Field{ + field.Int("size"). + Positive(), + field.String("name"), + } +} diff --git a/entc/integration/ent/schema/group.go b/entc/integration/ent/schema/group.go new file mode 100644 index 000000000..461117966 --- /dev/null +++ b/entc/integration/ent/schema/group.go @@ -0,0 +1,52 @@ +package schema + +import ( + "errors" + "regexp" + "strings" + + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" +) + +// Group holds the schema for the group entity. +type Group struct { + ent.Schema +} + +// Fields of the group. +func (Group) Fields() []ent.Field { + return []ent.Field{ + field.Bool("active"). + Default(true), + field.Time("expire"), + field.String("type"). + Optional(). + Nullable(). + MinLen(3), + field.Int("max_users"). + Optional(). + Positive(). + Default(10), + field.String("name"). + Comment("field with multiple validators"). + Match(regexp.MustCompile("[a-zA-Z_]+$")). + Validate(func(s string) error { + if strings.ToLower(s) == s { + return errors.New("last name must begin with uppercase") + } + return nil + }), + } +} + +// Edges of the group. +func (Group) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("files", File.Type), + edge.To("blocked", User.Type), + edge.From("users", User.Type).Ref("groups"), + edge.To("info", GroupInfo.Type).Unique().Required(), + } +} diff --git a/entc/integration/ent/schema/groupinfo.go b/entc/integration/ent/schema/groupinfo.go new file mode 100644 index 000000000..f3a168a2d --- /dev/null +++ b/entc/integration/ent/schema/groupinfo.go @@ -0,0 +1,29 @@ +package schema + +import ( + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" +) + +// GroupInfo holds the schema for the group-info entity. +type GroupInfo struct { + ent.Schema +} + +// Fields of the group. +func (GroupInfo) Fields() []ent.Field { + return []ent.Field{ + field.String("desc"), + field.Int("max_users"). + Default(1e4), + } +} + +// Edges of the group. +func (GroupInfo) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("groups", Group.Type). + Ref("info"), + } +} diff --git a/entc/integration/ent/schema/node.go b/entc/integration/ent/schema/node.go new file mode 100644 index 000000000..2770a951d --- /dev/null +++ b/entc/integration/ent/schema/node.go @@ -0,0 +1,30 @@ +package schema + +import ( + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" +) + +// Node holds the schema definition for the linked-list Node entity. +type Node struct { + ent.Schema +} + +// Fields of the Node. +func (Node) Fields() []ent.Field { + return []ent.Field{ + field.Int("value"). + Optional(), + } +} + +// Edges of the Node. +func (Node) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("next", Node.Type). + Unique(). + From("prev"). + Unique(), + } +} diff --git a/entc/integration/ent/schema/pet.go b/entc/integration/ent/schema/pet.go new file mode 100644 index 000000000..0341728e1 --- /dev/null +++ b/entc/integration/ent/schema/pet.go @@ -0,0 +1,30 @@ +package schema + +import ( + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" +) + +// Pet holds the schema definition for the Pet entity. +type Pet struct { + ent.Schema +} + +// Fields of the Pet. +func (Pet) Fields() []ent.Field { + return []ent.Field{ + field.String("name"), + } +} + +// Edges of the Dog. +func (Pet) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("team", User.Type). + Unique().Ref("team"), + edge.From("owner", User.Type). + Unique(). + Ref("pets"), + } +} diff --git a/entc/integration/ent/schema/user.go b/entc/integration/ent/schema/user.go new file mode 100644 index 000000000..7759eb7d0 --- /dev/null +++ b/entc/integration/ent/schema/user.go @@ -0,0 +1,47 @@ +package schema + +import ( + "fbc/ent" + "fbc/ent/edge" + "fbc/ent/field" +) + +// User holds the schema for the user entity. +type User struct { + ent.Schema +} + +// Fields of the user. +func (User) Fields() []ent.Field { + return []ent.Field{ + field.Int("age"), + field.String("name"). + StructTag(`json:"first_name" graphql:"first_name"`), + field.String("last"). + Default("unknown"). + StructTag(`graphql:"last_name"`), + field.String("nickname"). + Optional(). + Unique(), + field.String("phone"). + Optional(). + Unique(), + } +} + +// Edges of the user. +func (User) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("card", Card.Type). + Comment("O2O edge"). + Unique(), + edge.To("pets", Pet.Type), + edge.To("files", File.Type), + edge.To("groups", Group.Type), + edge.To("friends", User.Type), + edge.To("following", User.Type).From("followers"), + edge.To("team", Pet.Type).Unique(), + edge.To("spouse", User.Type).Unique(), + edge.To("parent", User.Type).Unique().From("children"), + } +} diff --git a/entc/integration/ent/tx.go b/entc/integration/ent/tx.go new file mode 100644 index 000000000..a137f3f49 --- /dev/null +++ b/entc/integration/ent/tx.go @@ -0,0 +1,122 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "fbc/ent/dialect" + "fbc/ent/entc/integration/ent/migrate" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Card is the client for interacting with the Card builders. + Card *CardClient + // Comment is the client for interacting with the Comment builders. + Comment *CommentClient + // File is the client for interacting with the File builders. + File *FileClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + // GroupInfo is the client for interacting with the GroupInfo builders. + GroupInfo *GroupInfoClient + // Node is the client for interacting with the Node builders. + Node *NodeClient + // Pet is the client for interacting with the Pet builders. + Pet *PetClient + // User is the client for interacting with the User builders. + User *UserClient +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + return tx.config.driver.(*txDriver).tx.Commit() +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + return tx.config.driver.(*txDriver).tx.Rollback() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + return &Client{ + config: tx.config, + Schema: migrate.NewSchema(tx.driver), + Card: NewCardClient(tx.config), + Comment: NewCommentClient(tx.config), + File: NewFileClient(tx.config), + Group: NewGroupClient(tx.config), + GroupInfo: NewGroupInfoClient(tx.config), + Node: NewNodeClient(tx.config), + Pet: NewPetClient(tx.config), + User: NewUserClient(tx.config), + } +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Card.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that this driver is safe for concurrent usage, however, it executes only one query +// at the time. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // protects the tx below from concurrent execution. + mu sync.Mutex + // tx is the underlying transaction. + tx dialect.Tx +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + tx.mu.Lock() + defer tx.mu.Unlock() + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + tx.mu.Lock() + defer tx.mu.Unlock() + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/entc/integration/ent/user.go b/entc/integration/ent/user.go new file mode 100644 index 000000000..9425fbca3 --- /dev/null +++ b/entc/integration/ent/user.go @@ -0,0 +1,234 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// User is the model entity for the User schema. +type User struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` + // Age holds the value of the "age" field. + Age int `json:"age,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"first_name" graphql:"first_name"` + // Last holds the value of the "last" field. + Last string `json:"last,omitempty" graphql:"last_name"` + // Nickname holds the value of the "nickname" field. + Nickname string `json:"nickname,omitempty"` + // Phone holds the value of the "phone" field. + Phone string `json:"phone,omitempty"` +} + +// FromResponse scans the gremlin response data into User. +func (u *User) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vu struct { + ID string `json:"id,omitempty"` + Age int `json:"age,omitempty"` + Name string `json:"name,omitempty"` + Last string `json:"last,omitempty"` + Nickname string `json:"nickname,omitempty"` + Phone string `json:"phone,omitempty"` + } + if err := vmap.Decode(&vu); err != nil { + return err + } + u.ID = vu.ID + u.Age = vu.Age + u.Name = vu.Name + u.Last = vu.Last + u.Nickname = vu.Nickname + u.Phone = vu.Phone + return nil +} + +// FromRows scans the sql response data into User. +func (u *User) FromRows(rows *sql.Rows) error { + var vu struct { + ID int + Age int + Name string + Last string + Nickname sql.NullString + Phone sql.NullString + } + // the order here should be the same as in the `user.Columns`. + if err := rows.Scan( + &vu.ID, + &vu.Age, + &vu.Name, + &vu.Last, + &vu.Nickname, + &vu.Phone, + ); err != nil { + return err + } + u.ID = strconv.Itoa(vu.ID) + u.Age = vu.Age + u.Name = vu.Name + u.Last = vu.Last + u.Nickname = vu.Nickname.String + u.Phone = vu.Phone.String + return nil +} + +// QueryCard queries the card edge of the User. +func (u *User) QueryCard() *CardQuery { + return (&UserClient{u.config}).QueryCard(u) +} + +// QueryPets queries the pets edge of the User. +func (u *User) QueryPets() *PetQuery { + return (&UserClient{u.config}).QueryPets(u) +} + +// QueryFiles queries the files edge of the User. +func (u *User) QueryFiles() *FileQuery { + return (&UserClient{u.config}).QueryFiles(u) +} + +// QueryGroups queries the groups edge of the User. +func (u *User) QueryGroups() *GroupQuery { + return (&UserClient{u.config}).QueryGroups(u) +} + +// QueryFriends queries the friends edge of the User. +func (u *User) QueryFriends() *UserQuery { + return (&UserClient{u.config}).QueryFriends(u) +} + +// QueryFollowers queries the followers edge of the User. +func (u *User) QueryFollowers() *UserQuery { + return (&UserClient{u.config}).QueryFollowers(u) +} + +// QueryFollowing queries the following edge of the User. +func (u *User) QueryFollowing() *UserQuery { + return (&UserClient{u.config}).QueryFollowing(u) +} + +// QueryTeam queries the team edge of the User. +func (u *User) QueryTeam() *PetQuery { + return (&UserClient{u.config}).QueryTeam(u) +} + +// QuerySpouse queries the spouse edge of the User. +func (u *User) QuerySpouse() *UserQuery { + return (&UserClient{u.config}).QuerySpouse(u) +} + +// QueryChildren queries the children edge of the User. +func (u *User) QueryChildren() *UserQuery { + return (&UserClient{u.config}).QueryChildren(u) +} + +// QueryParent queries the parent edge of the User. +func (u *User) QueryParent() *UserQuery { + return (&UserClient{u.config}).QueryParent(u) +} + +// Update returns a builder for updating this User. +// Note that, you need to call User.Unwrap() before calling this method, if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (u *User) Update() *UserUpdateOne { + return (&UserClient{u.config}).UpdateOne(u) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (u *User) Unwrap() *User { + tx, ok := u.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + u.config.driver = tx.drv + return u +} + +// String implements the fmt.Stringer. +func (u *User) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("User(") + buf.WriteString(fmt.Sprintf("id=%v,", u.ID)) + buf.WriteString(fmt.Sprintf("age=%v", u.Age)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("name=%v", u.Name)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("last=%v", u.Last)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("nickname=%v", u.Nickname)) + buf.WriteString(", ") + buf.WriteString(fmt.Sprintf("phone=%v", u.Phone)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (u *User) id() int { + id, _ := strconv.Atoi(u.ID) + return id +} + +// Users is a parsable slice of User. +type Users []*User + +// FromResponse scans the gremlin response data into Users. +func (u *Users) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vu []struct { + ID string `json:"id,omitempty"` + Age int `json:"age,omitempty"` + Name string `json:"name,omitempty"` + Last string `json:"last,omitempty"` + Nickname string `json:"nickname,omitempty"` + Phone string `json:"phone,omitempty"` + } + if err := vmap.Decode(&vu); err != nil { + return err + } + for _, v := range vu { + *u = append(*u, &User{ + ID: v.ID, + Age: v.Age, + Name: v.Name, + Last: v.Last, + Nickname: v.Nickname, + Phone: v.Phone, + }) + } + return nil +} + +// FromRows scans the sql response data into Users. +func (u *Users) FromRows(rows *sql.Rows) error { + for rows.Next() { + vu := &User{} + if err := vu.FromRows(rows); err != nil { + return err + } + *u = append(*u, vu) + } + return nil +} + +func (u Users) config(cfg config) { + for i := range u { + u[i].config = cfg + } +} diff --git a/entc/integration/ent/user/user.go b/entc/integration/ent/user/user.go new file mode 100644 index 000000000..11af89ae9 --- /dev/null +++ b/entc/integration/ent/user/user.go @@ -0,0 +1,122 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package user + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // CardLabel holds the string label denoting the card edge type in the database. + CardLabel = "user_card" + // PetsLabel holds the string label denoting the pets edge type in the database. + PetsLabel = "user_pets" + // FilesLabel holds the string label denoting the files edge type in the database. + FilesLabel = "user_files" + // GroupsLabel holds the string label denoting the groups edge type in the database. + GroupsLabel = "user_groups" + // FriendsLabel holds the string label denoting the friends edge type in the database. + FriendsLabel = "user_friends" + // FollowersInverseLabel holds the string label denoting the followers inverse edge type in the database. + FollowersInverseLabel = "user_following" + // FollowingLabel holds the string label denoting the following edge type in the database. + FollowingLabel = "user_following" + // TeamLabel holds the string label denoting the team edge type in the database. + TeamLabel = "user_team" + // SpouseLabel holds the string label denoting the spouse edge type in the database. + SpouseLabel = "user_spouse" + // ChildrenInverseLabel holds the string label denoting the children inverse edge type in the database. + ChildrenInverseLabel = "user_parent" + // ParentLabel holds the string label denoting the parent edge type in the database. + ParentLabel = "user_parent" + // FieldAge holds the string denoting the age vertex property in the database. + FieldAge = "age" + // FieldName holds the string denoting the name vertex property in the database. + FieldName = "name" + // FieldLast holds the string denoting the last vertex property in the database. + FieldLast = "last" + // DefaultLast holds the default value for the last field. + DefaultLast = "unknown" + // FieldNickname holds the string denoting the nickname vertex property in the database. + FieldNickname = "nickname" + // FieldPhone holds the string denoting the phone vertex property in the database. + FieldPhone = "phone" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the user in the database. + Table = "users" + // CardTable is the table the holds the card relation/edge. + CardTable = "cards" + // CardInverseTable is the table name for the Card entity. + // It exists in this package in order to avoid circular dependency with the "card" package. + CardInverseTable = "cards" + // CardColumn is the table column denoting the card relation/edge. + CardColumn = "owner_id" + // PetsTable is the table the holds the pets relation/edge. + PetsTable = "pets" + // PetsInverseTable is the table name for the Pet entity. + // It exists in this package in order to avoid circular dependency with the "pet" package. + PetsInverseTable = "pets" + // PetsColumn is the table column denoting the pets relation/edge. + PetsColumn = "owner_id" + // FilesTable is the table the holds the files relation/edge. + FilesTable = "files" + // FilesInverseTable is the table name for the File entity. + // It exists in this package in order to avoid circular dependency with the "file" package. + FilesInverseTable = "files" + // FilesColumn is the table column denoting the files relation/edge. + FilesColumn = "user_file_id" + // GroupsTable is the table the holds the groups relation/edge. The primary key declared below. + GroupsTable = "user_groups" + // GroupsInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupsInverseTable = "groups" + // FriendsTable is the table the holds the friends relation/edge. The primary key declared below. + FriendsTable = "user_friends" + // FollowersTable is the table the holds the followers relation/edge. The primary key declared below. + FollowersTable = "user_following" + // FollowingTable is the table the holds the following relation/edge. The primary key declared below. + FollowingTable = "user_following" + // TeamTable is the table the holds the team relation/edge. + TeamTable = "pets" + // TeamInverseTable is the table name for the Pet entity. + // It exists in this package in order to avoid circular dependency with the "pet" package. + TeamInverseTable = "pets" + // TeamColumn is the table column denoting the team relation/edge. + TeamColumn = "team_id" + // SpouseTable is the table the holds the spouse relation/edge. + SpouseTable = "users" + // SpouseColumn is the table column denoting the spouse relation/edge. + SpouseColumn = "user_spouse_id" + // ChildrenTable is the table the holds the children relation/edge. + ChildrenTable = "users" + // ChildrenColumn is the table column denoting the children relation/edge. + ChildrenColumn = "parent_id" + // ParentTable is the table the holds the parent relation/edge. + ParentTable = "users" + // ParentColumn is the table column denoting the parent relation/edge. + ParentColumn = "parent_id" +) + +// Columns holds all SQL columns are user fields. +var Columns = []string{ + FieldID, + FieldAge, + FieldName, + FieldLast, + FieldNickname, + FieldPhone, +} + +var ( + // GroupsPrimaryKey and GroupsColumn2 are the table columns denoting the + // primary key for the groups relation (M2M). + GroupsPrimaryKey = []string{"user_id", "group_id"} + // FriendsPrimaryKey and FriendsColumn2 are the table columns denoting the + // primary key for the friends relation (M2M). + FriendsPrimaryKey = []string{"user_id", "friend_id"} + // FollowersPrimaryKey and FollowersColumn2 are the table columns denoting the + // primary key for the followers relation (M2M). + FollowersPrimaryKey = []string{"user_id", "follower_id"} + // FollowingPrimaryKey and FollowingColumn2 are the table columns denoting the + // primary key for the following relation (M2M). + FollowingPrimaryKey = []string{"user_id", "follower_id"} +) diff --git a/entc/integration/ent/user/where.go b/entc/integration/ent/user/where.go new file mode 100644 index 000000000..3fd1cd92e --- /dev/null +++ b/entc/integration/ent/user/where.go @@ -0,0 +1,1417 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package user + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} + +// Age applies equality check predicate on the "age" field. It's identical to AgeEQ. +func Age(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.EQ(v)) + }, + } +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// Last applies equality check predicate on the "last" field. It's identical to LastEQ. +func Last(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.EQ(v)) + }, + } +} + +// Nickname applies equality check predicate on the "nickname" field. It's identical to NicknameEQ. +func Nickname(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.EQ(v)) + }, + } +} + +// Phone applies equality check predicate on the "phone" field. It's identical to PhoneEQ. +func Phone(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.EQ(v)) + }, + } +} + +// AgeEQ applies the EQ predicate on the "age" field. +func AgeEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.EQ(v)) + }, + } +} + +// AgeNEQ applies the NEQ predicate on the "age" field. +func AgeNEQ(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.NEQ(v)) + }, + } +} + +// AgeGT applies the GT predicate on the "age" field. +func AgeGT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.GT(v)) + }, + } +} + +// AgeGTE applies the GTE predicate on the "age" field. +func AgeGTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.GTE(v)) + }, + } +} + +// AgeLT applies the LT predicate on the "age" field. +func AgeLT(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.LT(v)) + }, + } +} + +// AgeLTE applies the LTE predicate on the "age" field. +func AgeLTE(v int) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldAge), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.LTE(v)) + }, + } +} + +// AgeIn applies the In predicate on the "age" field. +func AgeIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldAge), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.Within(v...)) + }, + } +} + +// AgeNotIn applies the NotIn predicate on the "age" field. +func AgeNotIn(vs ...int) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldAge), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldAge, p.Without(v...)) + }, + } +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EQ(v)) + }, + } +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.NEQ(v)) + }, + } +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GT(v)) + }, + } +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.GTE(v)) + }, + } +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LT(v)) + }, + } +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.LTE(v)) + }, + } +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Within(v...)) + }, + } +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldName), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Without(v...)) + }, + } +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.Containing(v)) + }, + } +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.StartingWith(v)) + }, + } +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldName), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldName, p.EndingWith(v)) + }, + } +} + +// LastEQ applies the EQ predicate on the "last" field. +func LastEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.EQ(v)) + }, + } +} + +// LastNEQ applies the NEQ predicate on the "last" field. +func LastNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.NEQ(v)) + }, + } +} + +// LastGT applies the GT predicate on the "last" field. +func LastGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.GT(v)) + }, + } +} + +// LastGTE applies the GTE predicate on the "last" field. +func LastGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.GTE(v)) + }, + } +} + +// LastLT applies the LT predicate on the "last" field. +func LastLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.LT(v)) + }, + } +} + +// LastLTE applies the LTE predicate on the "last" field. +func LastLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.LTE(v)) + }, + } +} + +// LastIn applies the In predicate on the "last" field. +func LastIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldLast), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.Within(v...)) + }, + } +} + +// LastNotIn applies the NotIn predicate on the "last" field. +func LastNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldLast), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.Without(v...)) + }, + } +} + +// LastContains applies the Contains predicate on the "last" field. +func LastContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.Containing(v)) + }, + } +} + +// LastHasPrefix applies the HasPrefix predicate on the "last" field. +func LastHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.StartingWith(v)) + }, + } +} + +// LastHasSuffix applies the HasSuffix predicate on the "last" field. +func LastHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldLast), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldLast, p.EndingWith(v)) + }, + } +} + +// NicknameEQ applies the EQ predicate on the "nickname" field. +func NicknameEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.EQ(v)) + }, + } +} + +// NicknameNEQ applies the NEQ predicate on the "nickname" field. +func NicknameNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.NEQ(v)) + }, + } +} + +// NicknameGT applies the GT predicate on the "nickname" field. +func NicknameGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.GT(v)) + }, + } +} + +// NicknameGTE applies the GTE predicate on the "nickname" field. +func NicknameGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.GTE(v)) + }, + } +} + +// NicknameLT applies the LT predicate on the "nickname" field. +func NicknameLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.LT(v)) + }, + } +} + +// NicknameLTE applies the LTE predicate on the "nickname" field. +func NicknameLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.LTE(v)) + }, + } +} + +// NicknameIn applies the In predicate on the "nickname" field. +func NicknameIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldNickname), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.Within(v...)) + }, + } +} + +// NicknameNotIn applies the NotIn predicate on the "nickname" field. +func NicknameNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldNickname), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.Without(v...)) + }, + } +} + +// NicknameContains applies the Contains predicate on the "nickname" field. +func NicknameContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.Containing(v)) + }, + } +} + +// NicknameHasPrefix applies the HasPrefix predicate on the "nickname" field. +func NicknameHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.StartingWith(v)) + }, + } +} + +// NicknameHasSuffix applies the HasSuffix predicate on the "nickname" field. +func NicknameHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldNickname), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldNickname, p.EndingWith(v)) + }, + } +} + +// PhoneEQ applies the EQ predicate on the "phone" field. +func PhoneEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.EQ(v)) + }, + } +} + +// PhoneNEQ applies the NEQ predicate on the "phone" field. +func PhoneNEQ(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.NEQ(v)) + }, + } +} + +// PhoneGT applies the GT predicate on the "phone" field. +func PhoneGT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.GT(v)) + }, + } +} + +// PhoneGTE applies the GTE predicate on the "phone" field. +func PhoneGTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.GTE(v)) + }, + } +} + +// PhoneLT applies the LT predicate on the "phone" field. +func PhoneLT(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.LT(v)) + }, + } +} + +// PhoneLTE applies the LTE predicate on the "phone" field. +func PhoneLTE(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.LTE(v)) + }, + } +} + +// PhoneIn applies the In predicate on the "phone" field. +func PhoneIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldPhone), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.Within(v...)) + }, + } +} + +// PhoneNotIn applies the NotIn predicate on the "phone" field. +func PhoneNotIn(vs ...string) ent.Predicate { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(vs) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldPhone), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.Without(v...)) + }, + } +} + +// PhoneContains applies the Contains predicate on the "phone" field. +func PhoneContains(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.Containing(v)) + }, + } +} + +// PhoneHasPrefix applies the HasPrefix predicate on the "phone" field. +func PhoneHasPrefix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.StartingWith(v)) + }, + } +} + +// PhoneHasSuffix applies the HasSuffix predicate on the "phone" field. +func PhoneHasSuffix(v string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldPhone), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.Has(Label, FieldPhone, p.EndingWith(v)) + }, + } +} + +// HasCard applies the HasEdge predicate on the "card" edge. +func HasCard() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(CardColumn). + From(sql.Table(CardTable)). + Where(sql.NotNull(CardColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(CardLabel).OutV() + }, + } +} + +// HasCardWith applies the HasEdge predicate on the "card" edge with a given conditions (other predicates). +func HasCardWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(CardColumn).From(sql.Table(CardTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(CardLabel).Where(tr).OutV() + }, + } +} + +// HasPets applies the HasEdge predicate on the "pets" edge. +func HasPets() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(PetsColumn). + From(sql.Table(PetsTable)). + Where(sql.NotNull(PetsColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(PetsLabel).OutV() + }, + } +} + +// HasPetsWith applies the HasEdge predicate on the "pets" edge with a given conditions (other predicates). +func HasPetsWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(PetsColumn).From(sql.Table(PetsTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(PetsLabel).Where(tr).OutV() + }, + } +} + +// HasFiles applies the HasEdge predicate on the "files" edge. +func HasFiles() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(FilesColumn). + From(sql.Table(FilesTable)). + Where(sql.NotNull(FilesColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(FilesLabel).OutV() + }, + } +} + +// HasFilesWith applies the HasEdge predicate on the "files" edge with a given conditions (other predicates). +func HasFilesWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FilesColumn).From(sql.Table(FilesTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(FilesLabel).Where(tr).OutV() + }, + } +} + +// HasGroups applies the HasEdge predicate on the "groups" edge. +func HasGroups() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(GroupsPrimaryKey[0]).From(sql.Table(GroupsTable)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(GroupsLabel).OutV() + }, + } +} + +// HasGroupsWith applies the HasEdge predicate on the "groups" edge with a given conditions (other predicates). +func HasGroupsWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Table(GroupsInverseTable) + t3 := sql.Table(GroupsTable) + t4 := sql.Select(t3.C(GroupsPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(GroupsPrimaryKey[1]), t2.C(FieldID)) + t5 := sql.Select().From(t2) + for _, p := range preds { + p.SQL(t5) + } + t4.FromSelect(t5) + s.Where(sql.In(t1.C(FieldID), t4)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(GroupsLabel).Where(tr).OutV() + }, + } +} + +// HasFriends applies the HasEdge predicate on the "friends" edge. +func HasFriends() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(FriendsPrimaryKey[0]).From(sql.Table(FriendsTable)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.Both(FriendsLabel) + }, + } +} + +// HasFriendsWith applies the HasEdge predicate on the "friends" edge with a given conditions (other predicates). +func HasFriendsWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Table(Table) + t3 := sql.Table(FriendsTable) + t4 := sql.Select(t3.C(FriendsPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(FriendsPrimaryKey[1]), t2.C(FieldID)) + t5 := sql.Select().From(t2) + for _, p := range preds { + p.SQL(t5) + } + t4.FromSelect(t5) + s.Where(sql.In(t1.C(FieldID), t4)) + }, + Gremlin: func(t *dsl.Traversal) { + in, out := __.InV(), __.OutV() + for _, p := range preds { + p.Gremlin(in) + p.Gremlin(out) + } + t.Where( + __.Or( + __.OutE(FriendsLabel).Where(in), + __.InE(FriendsLabel).Where(out), + ), + ) + }, + } +} + +// HasFollowers applies the HasEdge predicate on the "followers" edge. +func HasFollowers() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(FollowersPrimaryKey[1]).From(sql.Table(FollowersTable)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(FollowersInverseLabel).InV() + }, + } +} + +// HasFollowersWith applies the HasEdge predicate on the "followers" edge with a given conditions (other predicates). +func HasFollowersWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Table(Table) + t3 := sql.Table(FollowersTable) + t4 := sql.Select(t3.C(FollowersPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(FollowersPrimaryKey[0]), t2.C(FieldID)) + t5 := sql.Select().From(t2) + for _, p := range preds { + p.SQL(t5) + } + t4.FromSelect(t5) + s.Where(sql.In(t1.C(FieldID), t4)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(FollowersInverseLabel).Where(tr).InV() + }, + } +} + +// HasFollowing applies the HasEdge predicate on the "following" edge. +func HasFollowing() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(FollowingPrimaryKey[0]).From(sql.Table(FollowingTable)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(FollowingLabel).OutV() + }, + } +} + +// HasFollowingWith applies the HasEdge predicate on the "following" edge with a given conditions (other predicates). +func HasFollowingWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Table(Table) + t3 := sql.Table(FollowingTable) + t4 := sql.Select(t3.C(FollowingPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(FollowingPrimaryKey[1]), t2.C(FieldID)) + t5 := sql.Select().From(t2) + for _, p := range preds { + p.SQL(t5) + } + t4.FromSelect(t5) + s.Where(sql.In(t1.C(FieldID), t4)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(FollowingLabel).Where(tr).OutV() + }, + } +} + +// HasTeam applies the HasEdge predicate on the "team" edge. +func HasTeam() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(TeamColumn). + From(sql.Table(TeamTable)). + Where(sql.NotNull(TeamColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(TeamLabel).OutV() + }, + } +} + +// HasTeamWith applies the HasEdge predicate on the "team" edge with a given conditions (other predicates). +func HasTeamWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(TeamColumn).From(sql.Table(TeamTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(TeamLabel).Where(tr).OutV() + }, + } +} + +// HasSpouse applies the HasEdge predicate on the "spouse" edge. +func HasSpouse() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(SpouseColumn). + From(sql.Table(SpouseTable)). + Where(sql.NotNull(SpouseColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.Both(SpouseLabel) + }, + } +} + +// HasSpouseWith applies the HasEdge predicate on the "spouse" edge with a given conditions (other predicates). +func HasSpouseWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(SpouseColumn).From(sql.Table(SpouseTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + in, out := __.InV(), __.OutV() + for _, p := range preds { + p.Gremlin(in) + p.Gremlin(out) + } + t.Where( + __.Or( + __.OutE(SpouseLabel).Where(in), + __.InE(SpouseLabel).Where(out), + ), + ) + }, + } +} + +// HasChildren applies the HasEdge predicate on the "children" edge. +func HasChildren() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where( + sql.In( + t1.C(FieldID), + sql.Select(ChildrenColumn). + From(sql.Table(ChildrenTable)). + Where(sql.NotNull(ChildrenColumn)), + ), + ) + }, + Gremlin: func(t *dsl.Traversal) { + t.InE(ChildrenInverseLabel).InV() + }, + } +} + +// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates). +func HasChildrenWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(ChildrenColumn).From(sql.Table(ChildrenTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(FieldID), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.OutV() + for _, p := range preds { + p.Gremlin(tr) + } + t.InE(ChildrenInverseLabel).Where(tr).InV() + }, + } +} + +// HasParent applies the HasEdge predicate on the "parent" edge. +func HasParent() ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + s.Where(sql.NotNull(t1.C(ParentColumn))) + }, + Gremlin: func(t *dsl.Traversal) { + t.OutE(ParentLabel).OutV() + }, + } +} + +// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates). +func HasParentWith(preds ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + t1 := s.Table() + t2 := sql.Select(FieldID).From(sql.Table(ParentTable)) + for _, p := range preds { + p.SQL(t2) + } + s.Where(sql.In(t1.C(ParentColumn), t2)) + }, + Gremlin: func(t *dsl.Traversal) { + tr := __.InV() + for _, p := range preds { + p.Gremlin(tr) + } + t.OutE(ParentLabel).Where(tr).OutV() + }, + } +} diff --git a/entc/integration/ent/user_create.go b/entc/integration/ent/user_create.go new file mode 100644 index 000000000..7d54749c0 --- /dev/null +++ b/entc/integration/ent/user_create.go @@ -0,0 +1,747 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + age *int + name *string + last *string + nickname *string + phone *string + card map[string]struct{} + pets map[string]struct{} + files map[string]struct{} + groups map[string]struct{} + friends map[string]struct{} + followers map[string]struct{} + following map[string]struct{} + team map[string]struct{} + spouse map[string]struct{} + children map[string]struct{} + parent map[string]struct{} +} + +// SetAge sets the age field. +func (uc *UserCreate) SetAge(i int) *UserCreate { + uc.age = &i + return uc +} + +// SetName sets the name field. +func (uc *UserCreate) SetName(s string) *UserCreate { + uc.name = &s + return uc +} + +// SetLast sets the last field. +func (uc *UserCreate) SetLast(s string) *UserCreate { + uc.last = &s + return uc +} + +// SetNillableLast sets the last field if the given value is not nil. +func (uc *UserCreate) SetNillableLast(s *string) *UserCreate { + if s != nil { + uc.SetLast(*s) + } + return uc +} + +// SetNickname sets the nickname field. +func (uc *UserCreate) SetNickname(s string) *UserCreate { + uc.nickname = &s + return uc +} + +// SetNillableNickname sets the nickname field if the given value is not nil. +func (uc *UserCreate) SetNillableNickname(s *string) *UserCreate { + if s != nil { + uc.SetNickname(*s) + } + return uc +} + +// SetPhone sets the phone field. +func (uc *UserCreate) SetPhone(s string) *UserCreate { + uc.phone = &s + return uc +} + +// SetNillablePhone sets the phone field if the given value is not nil. +func (uc *UserCreate) SetNillablePhone(s *string) *UserCreate { + if s != nil { + uc.SetPhone(*s) + } + return uc +} + +// SetCardID sets the card edge to Card by id. +func (uc *UserCreate) SetCardID(id string) *UserCreate { + if uc.card == nil { + uc.card = make(map[string]struct{}) + } + uc.card[id] = struct{}{} + return uc +} + +// SetNillableCardID sets the card edge to Card by id if the given value is not nil. +func (uc *UserCreate) SetNillableCardID(id *string) *UserCreate { + if id != nil { + uc = uc.SetCardID(*id) + } + return uc +} + +// SetCard sets the card edge to Card. +func (uc *UserCreate) SetCard(c *Card) *UserCreate { + return uc.SetCardID(c.ID) +} + +// AddPetIDs adds the pets edge to Pet by ids. +func (uc *UserCreate) AddPetIDs(ids ...string) *UserCreate { + if uc.pets == nil { + uc.pets = make(map[string]struct{}) + } + for i := range ids { + uc.pets[ids[i]] = struct{}{} + } + return uc +} + +// AddPets adds the pets edges to Pet. +func (uc *UserCreate) AddPets(p ...*Pet) *UserCreate { + ids := make([]string, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uc.AddPetIDs(ids...) +} + +// AddFileIDs adds the files edge to File by ids. +func (uc *UserCreate) AddFileIDs(ids ...string) *UserCreate { + if uc.files == nil { + uc.files = make(map[string]struct{}) + } + for i := range ids { + uc.files[ids[i]] = struct{}{} + } + return uc +} + +// AddFiles adds the files edges to File. +func (uc *UserCreate) AddFiles(f ...*File) *UserCreate { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return uc.AddFileIDs(ids...) +} + +// AddGroupIDs adds the groups edge to Group by ids. +func (uc *UserCreate) AddGroupIDs(ids ...string) *UserCreate { + if uc.groups == nil { + uc.groups = make(map[string]struct{}) + } + for i := range ids { + uc.groups[ids[i]] = struct{}{} + } + return uc +} + +// AddGroups adds the groups edges to Group. +func (uc *UserCreate) AddGroups(g ...*Group) *UserCreate { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return uc.AddGroupIDs(ids...) +} + +// AddFriendIDs adds the friends edge to User by ids. +func (uc *UserCreate) AddFriendIDs(ids ...string) *UserCreate { + if uc.friends == nil { + uc.friends = make(map[string]struct{}) + } + for i := range ids { + uc.friends[ids[i]] = struct{}{} + } + return uc +} + +// AddFriends adds the friends edges to User. +func (uc *UserCreate) AddFriends(u ...*User) *UserCreate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uc.AddFriendIDs(ids...) +} + +// AddFollowerIDs adds the followers edge to User by ids. +func (uc *UserCreate) AddFollowerIDs(ids ...string) *UserCreate { + if uc.followers == nil { + uc.followers = make(map[string]struct{}) + } + for i := range ids { + uc.followers[ids[i]] = struct{}{} + } + return uc +} + +// AddFollowers adds the followers edges to User. +func (uc *UserCreate) AddFollowers(u ...*User) *UserCreate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uc.AddFollowerIDs(ids...) +} + +// AddFollowingIDs adds the following edge to User by ids. +func (uc *UserCreate) AddFollowingIDs(ids ...string) *UserCreate { + if uc.following == nil { + uc.following = make(map[string]struct{}) + } + for i := range ids { + uc.following[ids[i]] = struct{}{} + } + return uc +} + +// AddFollowing adds the following edges to User. +func (uc *UserCreate) AddFollowing(u ...*User) *UserCreate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uc.AddFollowingIDs(ids...) +} + +// SetTeamID sets the team edge to Pet by id. +func (uc *UserCreate) SetTeamID(id string) *UserCreate { + if uc.team == nil { + uc.team = make(map[string]struct{}) + } + uc.team[id] = struct{}{} + return uc +} + +// SetNillableTeamID sets the team edge to Pet by id if the given value is not nil. +func (uc *UserCreate) SetNillableTeamID(id *string) *UserCreate { + if id != nil { + uc = uc.SetTeamID(*id) + } + return uc +} + +// SetTeam sets the team edge to Pet. +func (uc *UserCreate) SetTeam(p *Pet) *UserCreate { + return uc.SetTeamID(p.ID) +} + +// SetSpouseID sets the spouse edge to User by id. +func (uc *UserCreate) SetSpouseID(id string) *UserCreate { + if uc.spouse == nil { + uc.spouse = make(map[string]struct{}) + } + uc.spouse[id] = struct{}{} + return uc +} + +// SetNillableSpouseID sets the spouse edge to User by id if the given value is not nil. +func (uc *UserCreate) SetNillableSpouseID(id *string) *UserCreate { + if id != nil { + uc = uc.SetSpouseID(*id) + } + return uc +} + +// SetSpouse sets the spouse edge to User. +func (uc *UserCreate) SetSpouse(u *User) *UserCreate { + return uc.SetSpouseID(u.ID) +} + +// AddChildIDs adds the children edge to User by ids. +func (uc *UserCreate) AddChildIDs(ids ...string) *UserCreate { + if uc.children == nil { + uc.children = make(map[string]struct{}) + } + for i := range ids { + uc.children[ids[i]] = struct{}{} + } + return uc +} + +// AddChildren adds the children edges to User. +func (uc *UserCreate) AddChildren(u ...*User) *UserCreate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uc.AddChildIDs(ids...) +} + +// SetParentID sets the parent edge to User by id. +func (uc *UserCreate) SetParentID(id string) *UserCreate { + if uc.parent == nil { + uc.parent = make(map[string]struct{}) + } + uc.parent[id] = struct{}{} + return uc +} + +// SetNillableParentID sets the parent edge to User by id if the given value is not nil. +func (uc *UserCreate) SetNillableParentID(id *string) *UserCreate { + if id != nil { + uc = uc.SetParentID(*id) + } + return uc +} + +// SetParent sets the parent edge to User. +func (uc *UserCreate) SetParent(u *User) *UserCreate { + return uc.SetParentID(u.ID) +} + +// Save creates the User in the database. +func (uc *UserCreate) Save(ctx context.Context) (*User, error) { + if uc.age == nil { + return nil, errors.New("ent: missing required field \"age\"") + } + if uc.name == nil { + return nil, errors.New("ent: missing required field \"name\"") + } + if uc.last == nil { + v := user.DefaultLast + uc.last = &v + } + if len(uc.card) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"card\"") + } + if len(uc.team) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"team\"") + } + if len(uc.spouse) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"spouse\"") + } + if len(uc.parent) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"parent\"") + } + switch uc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uc.sqlSave(ctx) + case dialect.Neptune: + return uc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (uc *UserCreate) SaveX(ctx context.Context) *User { + v, err := uc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { + var ( + res sql.Result + u = &User{config: uc.config} + ) + tx, err := uc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(user.Table).Default(uc.driver.Dialect()) + if uc.age != nil { + builder.Set(user.FieldAge, *uc.age) + u.Age = *uc.age + } + if uc.name != nil { + builder.Set(user.FieldName, *uc.name) + u.Name = *uc.name + } + if uc.last != nil { + builder.Set(user.FieldLast, *uc.last) + u.Last = *uc.last + } + if uc.nickname != nil { + builder.Set(user.FieldNickname, *uc.nickname) + u.Nickname = *uc.nickname + } + if uc.phone != nil { + builder.Set(user.FieldPhone, *uc.phone) + u.Phone = *uc.phone + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + u.ID = strconv.FormatInt(id, 10) + if len(uc.card) > 0 { + eid, err := strconv.Atoi(keys(uc.card)[0]) + if err != nil { + return nil, err + } + query, args := sql.Update(user.CardTable). + Set(user.CardColumn, id). + Where(sql.EQ(card.FieldID, eid).And().IsNull(user.CardColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uc.card) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"card\" %v already connected to a different \"User\"", keys(uc.card))}) + } + } + if len(uc.pets) > 0 { + p := sql.P() + for eid := range uc.pets { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + p.Or().EQ(pet.FieldID, eid) + } + query, args := sql.Update(user.PetsTable). + Set(user.PetsColumn, id). + Where(sql.And(p, sql.IsNull(user.PetsColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uc.pets) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"pets\" %v already connected to a different \"User\"", keys(uc.pets))}) + } + } + if len(uc.files) > 0 { + p := sql.P() + for eid := range uc.files { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + p.Or().EQ(file.FieldID, eid) + } + query, args := sql.Update(user.FilesTable). + Set(user.FilesColumn, id). + Where(sql.And(p, sql.IsNull(user.FilesColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uc.files) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"files\" %v already connected to a different \"User\"", keys(uc.files))}) + } + } + if len(uc.groups) > 0 { + for eid := range uc.groups { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + + query, args := sql.Insert(user.GroupsTable). + Columns(user.GroupsPrimaryKey[0], user.GroupsPrimaryKey[1]). + Values(id, eid). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if len(uc.friends) > 0 { + for eid := range uc.friends { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + + query, args := sql.Insert(user.FriendsTable). + Columns(user.FriendsPrimaryKey[0], user.FriendsPrimaryKey[1]). + Values(id, eid). + Values(eid, id). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if len(uc.followers) > 0 { + for eid := range uc.followers { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + + query, args := sql.Insert(user.FollowersTable). + Columns(user.FollowersPrimaryKey[1], user.FollowersPrimaryKey[0]). + Values(id, eid). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if len(uc.following) > 0 { + for eid := range uc.following { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + + query, args := sql.Insert(user.FollowingTable). + Columns(user.FollowingPrimaryKey[0], user.FollowingPrimaryKey[1]). + Values(id, eid). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if len(uc.team) > 0 { + eid, err := strconv.Atoi(keys(uc.team)[0]) + if err != nil { + return nil, err + } + query, args := sql.Update(user.TeamTable). + Set(user.TeamColumn, id). + Where(sql.EQ(pet.FieldID, eid).And().IsNull(user.TeamColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uc.team) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"team\" %v already connected to a different \"User\"", keys(uc.team))}) + } + } + if len(uc.spouse) > 0 { + for eid := range uc.spouse { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + query, args := sql.Update(user.SpouseTable). + Set(user.SpouseColumn, eid). + Where(sql.EQ(user.FieldID, id)).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + query, args = sql.Update(user.SpouseTable). + Set(user.SpouseColumn, id). + Where(sql.EQ(user.FieldID, eid).And().IsNull(user.SpouseColumn)).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uc.spouse) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("\"spouse\" (%v) already connected to a different \"User\"", eid)}) + } + } + } + if len(uc.children) > 0 { + p := sql.P() + for eid := range uc.children { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + p.Or().EQ(user.FieldID, eid) + } + query, args := sql.Update(user.ChildrenTable). + Set(user.ChildrenColumn, id). + Where(sql.And(p, sql.IsNull(user.ChildrenColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uc.children) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"children\" %v already connected to a different \"User\"", keys(uc.children))}) + } + } + if len(uc.parent) > 0 { + for eid := range uc.parent { + eid, err := strconv.Atoi(eid) + if err != nil { + return nil, err + } + query, args := sql.Update(user.ParentTable). + Set(user.ParentColumn, eid). + Where(sql.EQ(user.FieldID, id)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if err := tx.Commit(); err != nil { + return nil, err + } + return u, nil +} + +func (uc *UserCreate) gremlinSave(ctx context.Context) (*User, error) { + res := &gremlin.Response{} + query, bindings := uc.gremlin().Query() + if err := uc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + u := &User{config: uc.config} + if err := u.FromResponse(res); err != nil { + return nil, err + } + return u, nil +} + +func (uc *UserCreate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 8) + v := g.AddV(user.Label) + if uc.age != nil { + v.Property(dsl.Single, user.FieldAge, *uc.age) + } + if uc.name != nil { + v.Property(dsl.Single, user.FieldName, *uc.name) + } + if uc.last != nil { + v.Property(dsl.Single, user.FieldLast, *uc.last) + } + if uc.nickname != nil { + constraints = append(constraints, &constraint{ + pred: g.V().Has(user.Label, user.FieldNickname, *uc.nickname).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField(user.Label, user.FieldNickname, *uc.nickname)), + }) + v.Property(dsl.Single, user.FieldNickname, *uc.nickname) + } + if uc.phone != nil { + constraints = append(constraints, &constraint{ + pred: g.V().Has(user.Label, user.FieldPhone, *uc.phone).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField(user.Label, user.FieldPhone, *uc.phone)), + }) + v.Property(dsl.Single, user.FieldPhone, *uc.phone) + } + for id := range uc.card { + v.AddE(user.CardLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.CardLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.CardLabel, id)), + }) + } + for id := range uc.pets { + v.AddE(user.PetsLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.PetsLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.PetsLabel, id)), + }) + } + for id := range uc.files { + v.AddE(user.FilesLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.FilesLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.FilesLabel, id)), + }) + } + for id := range uc.groups { + v.AddE(user.GroupsLabel).To(g.V(id)).OutV() + } + for id := range uc.friends { + v.AddE(user.FriendsLabel).To(g.V(id)).OutV() + } + for id := range uc.followers { + v.AddE(user.FollowingLabel).From(g.V(id)).InV() + } + for id := range uc.following { + v.AddE(user.FollowingLabel).To(g.V(id)).OutV() + } + for id := range uc.team { + v.AddE(user.TeamLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.TeamLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.TeamLabel, id)), + }) + } + for id := range uc.spouse { + v.AddE(user.SpouseLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.SpouseLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.SpouseLabel, id)), + }) + } + for id := range uc.children { + v.AddE(user.ParentLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.ParentLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.ParentLabel, id)), + }) + } + for id := range uc.parent { + v.AddE(user.ParentLabel).To(g.V(id)).OutV() + } + if len(constraints) == 0 { + return v.ValueMap(true) + } + tr := constraints[0].pred.Coalesce(constraints[0].test, v.ValueMap(true)) + for _, cr := range constraints[1:] { + tr = cr.pred.Coalesce(cr.test, tr) + } + return tr +} diff --git a/entc/integration/ent/user_delete.go b/entc/integration/ent/user_delete.go new file mode 100644 index 000000000..97afc7911 --- /dev/null +++ b/entc/integration/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (ud *UserDelete) Where(ps ...ent.Predicate) *UserDelete { + ud.predicates = append(ud.predicates, ps...) + return ud +} + +// Exec executes the deletion query. +func (ud *UserDelete) Exec(ctx context.Context) error { + switch ud.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return ud.sqlExec(ctx) + case dialect.Neptune: + return ud.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ud *UserDelete) ExecX(ctx context.Context) { + if err := ud.Exec(ctx); err != nil { + panic(err) + } +} + +func (ud *UserDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(user.Table)) + for _, p := range ud.predicates { + p.SQL(selector) + } + query, args := sql.Delete(user.Table).FromSelect(selector).Query() + return ud.driver.Exec(ctx, query, args, &res) +} + +func (ud *UserDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := ud.gremlin().Query() + return ud.driver.Exec(ctx, query, bindings, res) +} + +func (ud *UserDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(user.Label) + for _, p := range ud.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + ud *UserDelete +} + +// Exec executes the deletion query. +func (udo *UserDeleteOne) Exec(ctx context.Context) error { + return udo.ud.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (udo *UserDeleteOne) ExecX(ctx context.Context) { + udo.ud.ExecX(ctx) +} diff --git a/entc/integration/ent/user_query.go b/entc/integration/ent/user_query.go new file mode 100644 index 000000000..327a6a73c --- /dev/null +++ b/entc/integration/ent/user_query.go @@ -0,0 +1,831 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (uq *UserQuery) Where(ps ...ent.Predicate) *UserQuery { + uq.predicates = append(uq.predicates, ps...) + return uq +} + +// Limit adds a limit step to the query. +func (uq *UserQuery) Limit(limit int) *UserQuery { + uq.limit = &limit + return uq +} + +// Order adds an order step to the query. +func (uq *UserQuery) Order(o ...Order) *UserQuery { + uq.order = append(uq.order, o...) + return uq +} + +// QueryCard chains the current query on the card edge. +func (uq *UserQuery) QueryCard() *CardQuery { + query := &CardQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(card.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(user.CardColumn), t2.C(user.FieldID)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.CardLabel).InV() + } + return query +} + +// QueryPets chains the current query on the pets edge. +func (uq *UserQuery) QueryPets() *PetQuery { + query := &PetQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(pet.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(user.PetsColumn), t2.C(user.FieldID)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.PetsLabel).InV() + } + return query +} + +// QueryFiles chains the current query on the files edge. +func (uq *UserQuery) QueryFiles() *FileQuery { + query := &FileQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(file.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(user.FilesColumn), t2.C(user.FieldID)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.FilesLabel).InV() + } + return query +} + +// QueryGroups chains the current query on the groups edge. +func (uq *UserQuery) QueryGroups() *GroupQuery { + query := &GroupQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(group.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + t3 := sql.Table(user.GroupsTable) + t4 := sql.Select(t3.C(user.GroupsPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(user.GroupsPrimaryKey[0]), t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(group.FieldID), t4.C(user.GroupsPrimaryKey[1])) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.GroupsLabel).InV() + } + return query +} + +// QueryFriends chains the current query on the friends edge. +func (uq *UserQuery) QueryFriends() *UserQuery { + query := &UserQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + t3 := sql.Table(user.FriendsTable) + t4 := sql.Select(t3.C(user.FriendsPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(user.FriendsPrimaryKey[0]), t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(user.FriendsPrimaryKey[1])) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.Both(user.FriendsLabel) + } + return query +} + +// QueryFollowers chains the current query on the followers edge. +func (uq *UserQuery) QueryFollowers() *UserQuery { + query := &UserQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + t3 := sql.Table(user.FollowersTable) + t4 := sql.Select(t3.C(user.FollowersPrimaryKey[0])). + From(t3). + Join(t2). + On(t3.C(user.FollowersPrimaryKey[1]), t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(user.FollowersPrimaryKey[0])) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.InE(user.FollowingLabel).OutV() + } + return query +} + +// QueryFollowing chains the current query on the following edge. +func (uq *UserQuery) QueryFollowing() *UserQuery { + query := &UserQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + t3 := sql.Table(user.FollowingTable) + t4 := sql.Select(t3.C(user.FollowingPrimaryKey[1])). + From(t3). + Join(t2). + On(t3.C(user.FollowingPrimaryKey[0]), t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t4). + On(t1.C(user.FieldID), t4.C(user.FollowingPrimaryKey[1])) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.FollowingLabel).InV() + } + return query +} + +// QueryTeam chains the current query on the team edge. +func (uq *UserQuery) QueryTeam() *PetQuery { + query := &PetQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(pet.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(user.TeamColumn), t2.C(user.FieldID)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.TeamLabel).InV() + } + return query +} + +// QuerySpouse chains the current query on the spouse edge. +func (uq *UserQuery) QuerySpouse() *UserQuery { + query := &UserQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(user.SpouseColumn), t2.C(user.FieldID)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.Both(user.SpouseLabel) + } + return query +} + +// QueryChildren chains the current query on the children edge. +func (uq *UserQuery) QueryChildren() *UserQuery { + query := &UserQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.FieldID)) + query.sql = sql.Select(). + From(t1). + Join(t2). + On(t1.C(user.ChildrenColumn), t2.C(user.FieldID)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.InE(user.ParentLabel).OutV() + } + return query +} + +// QueryParent chains the current query on the parent edge. +func (uq *UserQuery) QueryParent() *UserQuery { + query := &UserQuery{config: uq.config} + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + t1 := sql.Table(user.Table) + t2 := uq.sqlQuery() + t2.Select(t2.C(user.ParentColumn)) + query.sql = sql.Select(t1.Columns(user.Columns...)...). + From(t1). + Join(t2). + On(t1.C(user.FieldID), t2.C(user.ParentColumn)) + case dialect.Neptune: + gremlin := uq.gremlinQuery() + query.gremlin = gremlin.OutE(user.ParentLabel).InV() + } + return query +} + +// Get returns a User entity by its id. +func (uq *UserQuery) Get(ctx context.Context, id string) (*User, error) { + return uq.Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (uq *UserQuery) GetX(ctx context.Context, id string) *User { + u, err := uq.Get(ctx, id) + if err != nil { + panic(err) + } + return u +} + +// First returns the first User entity in the query. Returns *ErrNotFound when no user was found. +func (uq *UserQuery) First(ctx context.Context) (*User, error) { + us, err := uq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(us) == 0 { + return nil, &ErrNotFound{user.Label} + } + return us[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (uq *UserQuery) FirstX(ctx context.Context) *User { + u, err := uq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return u +} + +// FirstID returns the first User id in the query. Returns *ErrNotFound when no id was found. +func (uq *UserQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = uq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{user.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (uq *UserQuery) FirstXID(ctx context.Context) string { + id, err := uq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only User entity in the query, returns an error if not exactly one entity was returned. +func (uq *UserQuery) Only(ctx context.Context) (*User, error) { + us, err := uq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(us) { + case 1: + return us[0], nil + case 0: + return nil, &ErrNotFound{user.Label} + default: + return nil, &ErrNotSingular{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (uq *UserQuery) OnlyX(ctx context.Context) *User { + u, err := uq.Only(ctx) + if err != nil { + panic(err) + } + return u +} + +// OnlyID returns the only User id in the query, returns an error if not exactly one id was returned. +func (uq *UserQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = uq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{user.Label} + default: + err = &ErrNotSingular{user.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (uq *UserQuery) OnlyXID(ctx context.Context) string { + id, err := uq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uq.sqlAll(ctx) + case dialect.Neptune: + return uq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (uq *UserQuery) AllX(ctx context.Context) []*User { + us, err := uq.All(ctx) + if err != nil { + panic(err) + } + return us +} + +// IDs executes the query and returns a list of User ids. +func (uq *UserQuery) IDs(ctx context.Context) ([]string, error) { + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uq.sqlIDs(ctx) + case dialect.Neptune: + return uq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (uq *UserQuery) IDsX(ctx context.Context) []string { + ids, err := uq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (uq *UserQuery) Count(ctx context.Context) (int, error) { + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uq.sqlCount(ctx) + case dialect.Neptune: + return uq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (uq *UserQuery) CountX(ctx context.Context) int { + count, err := uq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uq.sqlExist(ctx) + case dialect.Neptune: + return uq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (uq *UserQuery) ExistX(ctx context.Context) bool { + exist, err := uq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Age int `json:"age,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldAge). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + group := &UserGroupBy{config: uq.config} + group.fields = append([]string{field}, fields...) + switch uq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = uq.sqlQuery() + case dialect.Neptune: + group.gremlin = uq.gremlinQuery() + } + return group +} + +func (uq *UserQuery) sqlAll(ctx context.Context) ([]*User, error) { + rows := &sql.Rows{} + selector := uq.sqlQuery() + if unique := uq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := uq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var us Users + if err := us.FromRows(rows); err != nil { + return nil, err + } + us.config(uq.config) + return us, nil +} + +func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := uq.sqlQuery() + unique := []string{user.FieldID} + if len(uq.unique) > 0 { + unique = uq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := uq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (uq *UserQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := uq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (uq *UserQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := uq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (uq *UserQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(user.Table) + selector := sql.Select(t1.Columns(user.Columns...)...).From(t1) + if uq.sql != nil { + selector = uq.sql + selector.Select(selector.Columns(user.Columns...)...) + } + for _, p := range uq.predicates { + p.SQL(selector) + } + for _, p := range uq.order { + p.SQL(selector) + } + if limit := uq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (uq *UserQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := uq.gremlinQuery().Query() + if err := uq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (uq *UserQuery) gremlinAll(ctx context.Context) ([]*User, error) { + res := &gremlin.Response{} + query, bindings := uq.gremlinQuery().ValueMap(true).Query() + if err := uq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var us Users + if err := us.FromResponse(res); err != nil { + return nil, err + } + us.config(uq.config) + return us, nil +} + +func (uq *UserQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := uq.gremlinQuery().Count().Query() + if err := uq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (uq *UserQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := uq.gremlinQuery().HasNext().Query() + if err := uq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (uq *UserQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(user.Label) + if uq.gremlin != nil { + v = uq.gremlin.Clone() + } + for _, p := range uq.predicates { + p.Gremlin(v) + } + if len(uq.order) > 0 { + v.Order() + for _, p := range uq.order { + p.Gremlin(v) + } + } + if limit := uq.limit; limit != nil { + v.Limit(*limit) + } + if unique := uq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// UserQuery is the builder for group-by User entities. +type UserGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ugb *UserGroupBy) Aggregate(fns ...Aggregate) *UserGroupBy { + ugb.fns = append(ugb.fns, fns...) + return ugb +} + +// Scan applies the group-by query and scan the result into the given value. +func (ugb *UserGroupBy) Scan(ctx context.Context, v interface{}) error { + switch ugb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return ugb.sqlScan(ctx, v) + case dialect.Neptune: + return ugb.gremlinScan(ctx, v) + default: + return errors.New("ugb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (ugb *UserGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := ugb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (ugb *UserGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(ugb.fields) > 1 { + return nil, errors.New("ent: UserGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := ugb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (ugb *UserGroupBy) StringsX(ctx context.Context) []string { + v, err := ugb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (ugb *UserGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(ugb.fields) > 1 { + return nil, errors.New("ent: UserGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := ugb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (ugb *UserGroupBy) IntsX(ctx context.Context) []int { + v, err := ugb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (ugb *UserGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(ugb.fields) > 1 { + return nil, errors.New("ent: UserGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := ugb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (ugb *UserGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := ugb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (ugb *UserGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(ugb.fields) > 1 { + return nil, errors.New("ent: UserGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := ugb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (ugb *UserGroupBy) BoolsX(ctx context.Context) []bool { + v, err := ugb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (ugb *UserGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := ugb.sqlQuery().Query() + if err := ugb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (ugb *UserGroupBy) sqlQuery() *sql.Selector { + selector := ugb.sql + columns := make([]string, 0, len(ugb.fields)+len(ugb.fns)) + columns = append(columns, ugb.fields...) + for _, fn := range ugb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(ugb.fields...) +} + +func (ugb *UserGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := ugb.gremlinQuery().Query() + if err := ugb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(ugb.fields)+len(ugb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (ugb *UserGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range ugb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range ugb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return ugb.gremlin.Group(). + By(__.Values(ugb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/ent/user_update.go b/entc/integration/ent/user_update.go new file mode 100644 index 000000000..a767519c0 --- /dev/null +++ b/entc/integration/ent/user_update.go @@ -0,0 +1,2436 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "strconv" + + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/file" + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + age *int + name *string + last *string + nickname *string + phone *string + card map[string]struct{} + pets map[string]struct{} + files map[string]struct{} + groups map[string]struct{} + friends map[string]struct{} + followers map[string]struct{} + following map[string]struct{} + team map[string]struct{} + spouse map[string]struct{} + children map[string]struct{} + parent map[string]struct{} + clearedCard bool + removedPets map[string]struct{} + removedFiles map[string]struct{} + removedGroups map[string]struct{} + removedFriends map[string]struct{} + removedFollowers map[string]struct{} + removedFollowing map[string]struct{} + clearedTeam bool + clearedSpouse bool + removedChildren map[string]struct{} + clearedParent bool + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (uu *UserUpdate) Where(ps ...ent.Predicate) *UserUpdate { + uu.predicates = append(uu.predicates, ps...) + return uu +} + +// SetAge sets the age field. +func (uu *UserUpdate) SetAge(i int) *UserUpdate { + uu.age = &i + return uu +} + +// SetName sets the name field. +func (uu *UserUpdate) SetName(s string) *UserUpdate { + uu.name = &s + return uu +} + +// SetLast sets the last field. +func (uu *UserUpdate) SetLast(s string) *UserUpdate { + uu.last = &s + return uu +} + +// SetNillableLast sets the last field if the given value is not nil. +func (uu *UserUpdate) SetNillableLast(s *string) *UserUpdate { + if s != nil { + uu.SetLast(*s) + } + return uu +} + +// SetNickname sets the nickname field. +func (uu *UserUpdate) SetNickname(s string) *UserUpdate { + uu.nickname = &s + return uu +} + +// SetNillableNickname sets the nickname field if the given value is not nil. +func (uu *UserUpdate) SetNillableNickname(s *string) *UserUpdate { + if s != nil { + uu.SetNickname(*s) + } + return uu +} + +// SetPhone sets the phone field. +func (uu *UserUpdate) SetPhone(s string) *UserUpdate { + uu.phone = &s + return uu +} + +// SetNillablePhone sets the phone field if the given value is not nil. +func (uu *UserUpdate) SetNillablePhone(s *string) *UserUpdate { + if s != nil { + uu.SetPhone(*s) + } + return uu +} + +// SetCardID sets the card edge to Card by id. +func (uu *UserUpdate) SetCardID(id string) *UserUpdate { + if uu.card == nil { + uu.card = make(map[string]struct{}) + } + uu.card[id] = struct{}{} + return uu +} + +// SetNillableCardID sets the card edge to Card by id if the given value is not nil. +func (uu *UserUpdate) SetNillableCardID(id *string) *UserUpdate { + if id != nil { + uu = uu.SetCardID(*id) + } + return uu +} + +// SetCard sets the card edge to Card. +func (uu *UserUpdate) SetCard(c *Card) *UserUpdate { + return uu.SetCardID(c.ID) +} + +// AddPetIDs adds the pets edge to Pet by ids. +func (uu *UserUpdate) AddPetIDs(ids ...string) *UserUpdate { + if uu.pets == nil { + uu.pets = make(map[string]struct{}) + } + for i := range ids { + uu.pets[ids[i]] = struct{}{} + } + return uu +} + +// AddPets adds the pets edges to Pet. +func (uu *UserUpdate) AddPets(p ...*Pet) *UserUpdate { + ids := make([]string, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uu.AddPetIDs(ids...) +} + +// AddFileIDs adds the files edge to File by ids. +func (uu *UserUpdate) AddFileIDs(ids ...string) *UserUpdate { + if uu.files == nil { + uu.files = make(map[string]struct{}) + } + for i := range ids { + uu.files[ids[i]] = struct{}{} + } + return uu +} + +// AddFiles adds the files edges to File. +func (uu *UserUpdate) AddFiles(f ...*File) *UserUpdate { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return uu.AddFileIDs(ids...) +} + +// AddGroupIDs adds the groups edge to Group by ids. +func (uu *UserUpdate) AddGroupIDs(ids ...string) *UserUpdate { + if uu.groups == nil { + uu.groups = make(map[string]struct{}) + } + for i := range ids { + uu.groups[ids[i]] = struct{}{} + } + return uu +} + +// AddGroups adds the groups edges to Group. +func (uu *UserUpdate) AddGroups(g ...*Group) *UserUpdate { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return uu.AddGroupIDs(ids...) +} + +// AddFriendIDs adds the friends edge to User by ids. +func (uu *UserUpdate) AddFriendIDs(ids ...string) *UserUpdate { + if uu.friends == nil { + uu.friends = make(map[string]struct{}) + } + for i := range ids { + uu.friends[ids[i]] = struct{}{} + } + return uu +} + +// AddFriends adds the friends edges to User. +func (uu *UserUpdate) AddFriends(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.AddFriendIDs(ids...) +} + +// AddFollowerIDs adds the followers edge to User by ids. +func (uu *UserUpdate) AddFollowerIDs(ids ...string) *UserUpdate { + if uu.followers == nil { + uu.followers = make(map[string]struct{}) + } + for i := range ids { + uu.followers[ids[i]] = struct{}{} + } + return uu +} + +// AddFollowers adds the followers edges to User. +func (uu *UserUpdate) AddFollowers(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.AddFollowerIDs(ids...) +} + +// AddFollowingIDs adds the following edge to User by ids. +func (uu *UserUpdate) AddFollowingIDs(ids ...string) *UserUpdate { + if uu.following == nil { + uu.following = make(map[string]struct{}) + } + for i := range ids { + uu.following[ids[i]] = struct{}{} + } + return uu +} + +// AddFollowing adds the following edges to User. +func (uu *UserUpdate) AddFollowing(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.AddFollowingIDs(ids...) +} + +// SetTeamID sets the team edge to Pet by id. +func (uu *UserUpdate) SetTeamID(id string) *UserUpdate { + if uu.team == nil { + uu.team = make(map[string]struct{}) + } + uu.team[id] = struct{}{} + return uu +} + +// SetNillableTeamID sets the team edge to Pet by id if the given value is not nil. +func (uu *UserUpdate) SetNillableTeamID(id *string) *UserUpdate { + if id != nil { + uu = uu.SetTeamID(*id) + } + return uu +} + +// SetTeam sets the team edge to Pet. +func (uu *UserUpdate) SetTeam(p *Pet) *UserUpdate { + return uu.SetTeamID(p.ID) +} + +// SetSpouseID sets the spouse edge to User by id. +func (uu *UserUpdate) SetSpouseID(id string) *UserUpdate { + if uu.spouse == nil { + uu.spouse = make(map[string]struct{}) + } + uu.spouse[id] = struct{}{} + return uu +} + +// SetNillableSpouseID sets the spouse edge to User by id if the given value is not nil. +func (uu *UserUpdate) SetNillableSpouseID(id *string) *UserUpdate { + if id != nil { + uu = uu.SetSpouseID(*id) + } + return uu +} + +// SetSpouse sets the spouse edge to User. +func (uu *UserUpdate) SetSpouse(u *User) *UserUpdate { + return uu.SetSpouseID(u.ID) +} + +// AddChildIDs adds the children edge to User by ids. +func (uu *UserUpdate) AddChildIDs(ids ...string) *UserUpdate { + if uu.children == nil { + uu.children = make(map[string]struct{}) + } + for i := range ids { + uu.children[ids[i]] = struct{}{} + } + return uu +} + +// AddChildren adds the children edges to User. +func (uu *UserUpdate) AddChildren(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.AddChildIDs(ids...) +} + +// SetParentID sets the parent edge to User by id. +func (uu *UserUpdate) SetParentID(id string) *UserUpdate { + if uu.parent == nil { + uu.parent = make(map[string]struct{}) + } + uu.parent[id] = struct{}{} + return uu +} + +// SetNillableParentID sets the parent edge to User by id if the given value is not nil. +func (uu *UserUpdate) SetNillableParentID(id *string) *UserUpdate { + if id != nil { + uu = uu.SetParentID(*id) + } + return uu +} + +// SetParent sets the parent edge to User. +func (uu *UserUpdate) SetParent(u *User) *UserUpdate { + return uu.SetParentID(u.ID) +} + +// ClearCard clears the card edge to Card. +func (uu *UserUpdate) ClearCard() *UserUpdate { + uu.clearedCard = true + return uu +} + +// RemovePetIDs removes the pets edge to Pet by ids. +func (uu *UserUpdate) RemovePetIDs(ids ...string) *UserUpdate { + if uu.removedPets == nil { + uu.removedPets = make(map[string]struct{}) + } + for i := range ids { + uu.removedPets[ids[i]] = struct{}{} + } + return uu +} + +// RemovePets removes pets edges to Pet. +func (uu *UserUpdate) RemovePets(p ...*Pet) *UserUpdate { + ids := make([]string, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uu.RemovePetIDs(ids...) +} + +// RemoveFileIDs removes the files edge to File by ids. +func (uu *UserUpdate) RemoveFileIDs(ids ...string) *UserUpdate { + if uu.removedFiles == nil { + uu.removedFiles = make(map[string]struct{}) + } + for i := range ids { + uu.removedFiles[ids[i]] = struct{}{} + } + return uu +} + +// RemoveFiles removes files edges to File. +func (uu *UserUpdate) RemoveFiles(f ...*File) *UserUpdate { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return uu.RemoveFileIDs(ids...) +} + +// RemoveGroupIDs removes the groups edge to Group by ids. +func (uu *UserUpdate) RemoveGroupIDs(ids ...string) *UserUpdate { + if uu.removedGroups == nil { + uu.removedGroups = make(map[string]struct{}) + } + for i := range ids { + uu.removedGroups[ids[i]] = struct{}{} + } + return uu +} + +// RemoveGroups removes groups edges to Group. +func (uu *UserUpdate) RemoveGroups(g ...*Group) *UserUpdate { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return uu.RemoveGroupIDs(ids...) +} + +// RemoveFriendIDs removes the friends edge to User by ids. +func (uu *UserUpdate) RemoveFriendIDs(ids ...string) *UserUpdate { + if uu.removedFriends == nil { + uu.removedFriends = make(map[string]struct{}) + } + for i := range ids { + uu.removedFriends[ids[i]] = struct{}{} + } + return uu +} + +// RemoveFriends removes friends edges to User. +func (uu *UserUpdate) RemoveFriends(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.RemoveFriendIDs(ids...) +} + +// RemoveFollowerIDs removes the followers edge to User by ids. +func (uu *UserUpdate) RemoveFollowerIDs(ids ...string) *UserUpdate { + if uu.removedFollowers == nil { + uu.removedFollowers = make(map[string]struct{}) + } + for i := range ids { + uu.removedFollowers[ids[i]] = struct{}{} + } + return uu +} + +// RemoveFollowers removes followers edges to User. +func (uu *UserUpdate) RemoveFollowers(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.RemoveFollowerIDs(ids...) +} + +// RemoveFollowingIDs removes the following edge to User by ids. +func (uu *UserUpdate) RemoveFollowingIDs(ids ...string) *UserUpdate { + if uu.removedFollowing == nil { + uu.removedFollowing = make(map[string]struct{}) + } + for i := range ids { + uu.removedFollowing[ids[i]] = struct{}{} + } + return uu +} + +// RemoveFollowing removes following edges to User. +func (uu *UserUpdate) RemoveFollowing(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.RemoveFollowingIDs(ids...) +} + +// ClearTeam clears the team edge to Pet. +func (uu *UserUpdate) ClearTeam() *UserUpdate { + uu.clearedTeam = true + return uu +} + +// ClearSpouse clears the spouse edge to User. +func (uu *UserUpdate) ClearSpouse() *UserUpdate { + uu.clearedSpouse = true + return uu +} + +// RemoveChildIDs removes the children edge to User by ids. +func (uu *UserUpdate) RemoveChildIDs(ids ...string) *UserUpdate { + if uu.removedChildren == nil { + uu.removedChildren = make(map[string]struct{}) + } + for i := range ids { + uu.removedChildren[ids[i]] = struct{}{} + } + return uu +} + +// RemoveChildren removes children edges to User. +func (uu *UserUpdate) RemoveChildren(u ...*User) *UserUpdate { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uu.RemoveChildIDs(ids...) +} + +// ClearParent clears the parent edge to User. +func (uu *UserUpdate) ClearParent() *UserUpdate { + uu.clearedParent = true + return uu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (uu *UserUpdate) Save(ctx context.Context) (int, error) { + if len(uu.card) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"card\"") + } + if len(uu.team) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"team\"") + } + if len(uu.spouse) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"spouse\"") + } + if len(uu.parent) > 1 { + return 0, errors.New("ent: multiple assignments on a unique edge \"parent\"") + } + switch uu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := uu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (uu *UserUpdate) SaveX(ctx context.Context) int { + affected, err := uu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (uu *UserUpdate) Exec(ctx context.Context) error { + _, err := uu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uu *UserUpdate) ExecX(ctx context.Context) { + if err := uu.Exec(ctx); err != nil { + panic(err) + } +} + +func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(user.FieldID).From(sql.Table(user.Table)) + for _, p := range uu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = uu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := uu.driver.Tx(ctx) + if err != nil { + return 0, err + } + var ( + update bool + res sql.Result + builder = sql.Update(user.Table).Where(sql.InInts(user.FieldID, ids...)) + ) + if uu.age != nil { + update = true + builder.Set(user.FieldAge, *uu.age) + } + if uu.name != nil { + update = true + builder.Set(user.FieldName, *uu.name) + } + if uu.last != nil { + update = true + builder.Set(user.FieldLast, *uu.last) + } + if uu.nickname != nil { + update = true + builder.Set(user.FieldNickname, *uu.nickname) + } + if uu.phone != nil { + update = true + builder.Set(user.FieldPhone, *uu.phone) + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if uu.clearedCard { + query, args := sql.Update(user.CardTable). + SetNull(user.CardColumn). + Where(sql.InInts(card.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.card) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(uu.card)[0]) + if serr != nil { + return 0, err + } + query, args := sql.Update(user.CardTable). + Set(user.CardColumn, id). + Where(sql.EQ(card.FieldID, eid).And().IsNull(user.CardColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(uu.card) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"card\" %v already connected to a different \"User\"", keys(uu.card))}) + } + } + } + if len(uu.removedPets) > 0 { + eids := make([]int, len(uu.removedPets)) + for eid := range uu.removedPets { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(user.PetsTable). + SetNull(user.PetsColumn). + Where(sql.InInts(user.PetsColumn, ids...)). + Where(sql.InInts(pet.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.pets) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range uu.pets { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(pet.FieldID, eid) + } + query, args := sql.Update(user.PetsTable). + Set(user.PetsColumn, id). + Where(sql.And(p, sql.IsNull(user.PetsColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(uu.pets) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"pets\" %v already connected to a different \"User\"", keys(uu.pets))}) + } + } + } + if len(uu.removedFiles) > 0 { + eids := make([]int, len(uu.removedFiles)) + for eid := range uu.removedFiles { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(user.FilesTable). + SetNull(user.FilesColumn). + Where(sql.InInts(user.FilesColumn, ids...)). + Where(sql.InInts(file.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.files) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range uu.files { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(file.FieldID, eid) + } + query, args := sql.Update(user.FilesTable). + Set(user.FilesColumn, id). + Where(sql.And(p, sql.IsNull(user.FilesColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(uu.files) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"files\" %v already connected to a different \"User\"", keys(uu.files))}) + } + } + } + if len(uu.removedGroups) > 0 { + eids := make([]int, len(uu.removedGroups)) + for eid := range uu.removedGroups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.GroupsTable). + Where(sql.InInts(user.GroupsPrimaryKey[0], ids...)). + Where(sql.InInts(user.GroupsPrimaryKey[1], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.groups) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uu.groups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(user.GroupsTable). + Columns(user.GroupsPrimaryKey[0], user.GroupsPrimaryKey[1]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.removedFriends) > 0 { + eids := make([]int, len(uu.removedFriends)) + for eid := range uu.removedFriends { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.FriendsTable). + Where(sql.InInts(user.FriendsPrimaryKey[0], ids...)). + Where(sql.InInts(user.FriendsPrimaryKey[1], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + query, args = sql.Delete(user.FriendsTable). + Where(sql.InInts(user.FriendsPrimaryKey[1], ids...)). + Where(sql.InInts(user.FriendsPrimaryKey[0], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.friends) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uu.friends { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}, []int{eid, id}) + } + } + builder := sql.Insert(user.FriendsTable). + Columns(user.FriendsPrimaryKey[0], user.FriendsPrimaryKey[1]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.removedFollowers) > 0 { + eids := make([]int, len(uu.removedFollowers)) + for eid := range uu.removedFollowers { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.FollowersTable). + Where(sql.InInts(user.FollowersPrimaryKey[1], ids...)). + Where(sql.InInts(user.FollowersPrimaryKey[0], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.followers) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uu.followers { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(user.FollowersTable). + Columns(user.FollowersPrimaryKey[1], user.FollowersPrimaryKey[0]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.removedFollowing) > 0 { + eids := make([]int, len(uu.removedFollowing)) + for eid := range uu.removedFollowing { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.FollowingTable). + Where(sql.InInts(user.FollowingPrimaryKey[0], ids...)). + Where(sql.InInts(user.FollowingPrimaryKey[1], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.following) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uu.following { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(user.FollowingTable). + Columns(user.FollowingPrimaryKey[0], user.FollowingPrimaryKey[1]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if uu.clearedTeam { + query, args := sql.Update(user.TeamTable). + SetNull(user.TeamColumn). + Where(sql.InInts(pet.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.team) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(uu.team)[0]) + if serr != nil { + return 0, err + } + query, args := sql.Update(user.TeamTable). + Set(user.TeamColumn, id). + Where(sql.EQ(pet.FieldID, eid).And().IsNull(user.TeamColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(uu.team) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"team\" %v already connected to a different \"User\"", keys(uu.team))}) + } + } + } + if uu.clearedSpouse { + query, args := sql.Update(user.SpouseTable). + SetNull(user.SpouseColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + query, args = sql.Update(user.SpouseTable). + SetNull(user.SpouseColumn). + Where(sql.InInts(user.SpouseColumn, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.spouse) > 0 { + if n := len(ids); n > 1 { + return 0, rollback(tx, fmt.Errorf("ent: can't link O2O edge \"spouse\" to %d vertices (> 1)", n)) + } + for eid := range uu.spouse { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(user.SpouseTable). + Set(user.SpouseColumn, eid). + Where(sql.EQ(user.FieldID, ids[0])).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + query, args = sql.Update(user.SpouseTable). + Set(user.SpouseColumn, ids[0]). + Where(sql.EQ(user.FieldID, eid).And().IsNull(user.SpouseColumn)).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(uu.spouse) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("\"spouse\" (%v) already connected to a different \"User\"", eid)}) + } + } + } + if len(uu.removedChildren) > 0 { + eids := make([]int, len(uu.removedChildren)) + for eid := range uu.removedChildren { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(user.ChildrenTable). + SetNull(user.ChildrenColumn). + Where(sql.InInts(user.ChildrenColumn, ids...)). + Where(sql.InInts(user.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.children) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range uu.children { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(user.FieldID, eid) + } + query, args := sql.Update(user.ChildrenTable). + Set(user.ChildrenColumn, id). + Where(sql.And(p, sql.IsNull(user.ChildrenColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return 0, rollback(tx, err) + } + if int(affected) < len(uu.children) { + return 0, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"children\" %v already connected to a different \"User\"", keys(uu.children))}) + } + } + } + if uu.clearedParent { + query, args := sql.Update(user.ParentTable). + SetNull(user.ParentColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + if len(uu.parent) > 0 { + for eid := range uu.parent { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(user.ParentTable). + Set(user.ParentColumn, eid). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return 0, rollback(tx, err) + } + } + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (uu *UserUpdate) gremlinSave(ctx context.Context) ([]*User, error) { + res := &gremlin.Response{} + query, bindings := uu.gremlin().Query() + if err := uu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var us Users + us.config(uu.config) + if err := us.FromResponse(res); err != nil { + return nil, err + } + return us, nil +} + +func (uu *UserUpdate) gremlin() *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 8) + v := g.V().HasLabel(user.Label) + for _, p := range uu.predicates { + p.Gremlin(v) + } + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if uu.age != nil { + v.Property(dsl.Single, user.FieldAge, *uu.age) + } + if uu.name != nil { + v.Property(dsl.Single, user.FieldName, *uu.name) + } + if uu.last != nil { + v.Property(dsl.Single, user.FieldLast, *uu.last) + } + if uu.nickname != nil { + constraints = append(constraints, &constraint{ + pred: g.V().Has(user.Label, user.FieldNickname, *uu.nickname).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField(user.Label, user.FieldNickname, *uu.nickname)), + }) + v.Property(dsl.Single, user.FieldNickname, *uu.nickname) + } + if uu.phone != nil { + constraints = append(constraints, &constraint{ + pred: g.V().Has(user.Label, user.FieldPhone, *uu.phone).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField(user.Label, user.FieldPhone, *uu.phone)), + }) + v.Property(dsl.Single, user.FieldPhone, *uu.phone) + } + if uu.clearedCard { + tr := rv.Clone().OutE(user.CardLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.card { + v.AddE(user.CardLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.CardLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.CardLabel, id)), + }) + } + for id := range uu.removedPets { + tr := rv.Clone().OutE(user.PetsLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.pets { + v.AddE(user.PetsLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.PetsLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.PetsLabel, id)), + }) + } + for id := range uu.removedFiles { + tr := rv.Clone().OutE(user.FilesLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.files { + v.AddE(user.FilesLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.FilesLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.FilesLabel, id)), + }) + } + for id := range uu.removedGroups { + tr := rv.Clone().OutE(user.GroupsLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.groups { + v.AddE(user.GroupsLabel).To(g.V(id)).OutV() + } + for id := range uu.removedFriends { + tr := rv.Clone().BothE(user.FriendsLabel).Where(__.Or(__.InV().HasID(id), __.OutV().HasID(id))).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.friends { + v.AddE(user.FriendsLabel).To(g.V(id)).OutV() + } + for id := range uu.removedFollowers { + tr := rv.Clone().InE(user.FollowingLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.followers { + v.AddE(user.FollowingLabel).From(g.V(id)).InV() + } + for id := range uu.removedFollowing { + tr := rv.Clone().OutE(user.FollowingLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.following { + v.AddE(user.FollowingLabel).To(g.V(id)).OutV() + } + if uu.clearedTeam { + tr := rv.Clone().OutE(user.TeamLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.team { + v.AddE(user.TeamLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.TeamLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.TeamLabel, id)), + }) + } + if uu.clearedSpouse { + tr := rv.Clone().BothE(user.SpouseLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.spouse { + v.AddE(user.SpouseLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: rv.Clone().Both(user.SpouseLabel).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.SpouseLabel, id)), + }) + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.SpouseLabel).Where(__.Or(__.InV().HasID(id), __.OutV().HasID(id))).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.SpouseLabel, id)), + }) + } + for id := range uu.removedChildren { + tr := rv.Clone().InE(user.ParentLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.children { + v.AddE(user.ParentLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.ParentLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.ParentLabel, id)), + }) + } + if uu.clearedParent { + tr := rv.Clone().OutE(user.ParentLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uu.parent { + v.AddE(user.ParentLabel).To(g.V(id)).OutV() + } + v.ValueMap(true) + if len(constraints) > 0 { + constraints = append(constraints, &constraint{ + pred: rv.Count(), + test: __.Is(p.GT(1)).Constant(&ErrConstraintFailed{msg: "update traversal contains more than one vertex"}), + }) + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + id string + age *int + name *string + last *string + nickname *string + phone *string + card map[string]struct{} + pets map[string]struct{} + files map[string]struct{} + groups map[string]struct{} + friends map[string]struct{} + followers map[string]struct{} + following map[string]struct{} + team map[string]struct{} + spouse map[string]struct{} + children map[string]struct{} + parent map[string]struct{} + clearedCard bool + removedPets map[string]struct{} + removedFiles map[string]struct{} + removedGroups map[string]struct{} + removedFriends map[string]struct{} + removedFollowers map[string]struct{} + removedFollowing map[string]struct{} + clearedTeam bool + clearedSpouse bool + removedChildren map[string]struct{} + clearedParent bool +} + +// SetAge sets the age field. +func (uuo *UserUpdateOne) SetAge(i int) *UserUpdateOne { + uuo.age = &i + return uuo +} + +// SetName sets the name field. +func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne { + uuo.name = &s + return uuo +} + +// SetLast sets the last field. +func (uuo *UserUpdateOne) SetLast(s string) *UserUpdateOne { + uuo.last = &s + return uuo +} + +// SetNillableLast sets the last field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableLast(s *string) *UserUpdateOne { + if s != nil { + uuo.SetLast(*s) + } + return uuo +} + +// SetNickname sets the nickname field. +func (uuo *UserUpdateOne) SetNickname(s string) *UserUpdateOne { + uuo.nickname = &s + return uuo +} + +// SetNillableNickname sets the nickname field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableNickname(s *string) *UserUpdateOne { + if s != nil { + uuo.SetNickname(*s) + } + return uuo +} + +// SetPhone sets the phone field. +func (uuo *UserUpdateOne) SetPhone(s string) *UserUpdateOne { + uuo.phone = &s + return uuo +} + +// SetNillablePhone sets the phone field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillablePhone(s *string) *UserUpdateOne { + if s != nil { + uuo.SetPhone(*s) + } + return uuo +} + +// SetCardID sets the card edge to Card by id. +func (uuo *UserUpdateOne) SetCardID(id string) *UserUpdateOne { + if uuo.card == nil { + uuo.card = make(map[string]struct{}) + } + uuo.card[id] = struct{}{} + return uuo +} + +// SetNillableCardID sets the card edge to Card by id if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableCardID(id *string) *UserUpdateOne { + if id != nil { + uuo = uuo.SetCardID(*id) + } + return uuo +} + +// SetCard sets the card edge to Card. +func (uuo *UserUpdateOne) SetCard(c *Card) *UserUpdateOne { + return uuo.SetCardID(c.ID) +} + +// AddPetIDs adds the pets edge to Pet by ids. +func (uuo *UserUpdateOne) AddPetIDs(ids ...string) *UserUpdateOne { + if uuo.pets == nil { + uuo.pets = make(map[string]struct{}) + } + for i := range ids { + uuo.pets[ids[i]] = struct{}{} + } + return uuo +} + +// AddPets adds the pets edges to Pet. +func (uuo *UserUpdateOne) AddPets(p ...*Pet) *UserUpdateOne { + ids := make([]string, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uuo.AddPetIDs(ids...) +} + +// AddFileIDs adds the files edge to File by ids. +func (uuo *UserUpdateOne) AddFileIDs(ids ...string) *UserUpdateOne { + if uuo.files == nil { + uuo.files = make(map[string]struct{}) + } + for i := range ids { + uuo.files[ids[i]] = struct{}{} + } + return uuo +} + +// AddFiles adds the files edges to File. +func (uuo *UserUpdateOne) AddFiles(f ...*File) *UserUpdateOne { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return uuo.AddFileIDs(ids...) +} + +// AddGroupIDs adds the groups edge to Group by ids. +func (uuo *UserUpdateOne) AddGroupIDs(ids ...string) *UserUpdateOne { + if uuo.groups == nil { + uuo.groups = make(map[string]struct{}) + } + for i := range ids { + uuo.groups[ids[i]] = struct{}{} + } + return uuo +} + +// AddGroups adds the groups edges to Group. +func (uuo *UserUpdateOne) AddGroups(g ...*Group) *UserUpdateOne { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return uuo.AddGroupIDs(ids...) +} + +// AddFriendIDs adds the friends edge to User by ids. +func (uuo *UserUpdateOne) AddFriendIDs(ids ...string) *UserUpdateOne { + if uuo.friends == nil { + uuo.friends = make(map[string]struct{}) + } + for i := range ids { + uuo.friends[ids[i]] = struct{}{} + } + return uuo +} + +// AddFriends adds the friends edges to User. +func (uuo *UserUpdateOne) AddFriends(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.AddFriendIDs(ids...) +} + +// AddFollowerIDs adds the followers edge to User by ids. +func (uuo *UserUpdateOne) AddFollowerIDs(ids ...string) *UserUpdateOne { + if uuo.followers == nil { + uuo.followers = make(map[string]struct{}) + } + for i := range ids { + uuo.followers[ids[i]] = struct{}{} + } + return uuo +} + +// AddFollowers adds the followers edges to User. +func (uuo *UserUpdateOne) AddFollowers(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.AddFollowerIDs(ids...) +} + +// AddFollowingIDs adds the following edge to User by ids. +func (uuo *UserUpdateOne) AddFollowingIDs(ids ...string) *UserUpdateOne { + if uuo.following == nil { + uuo.following = make(map[string]struct{}) + } + for i := range ids { + uuo.following[ids[i]] = struct{}{} + } + return uuo +} + +// AddFollowing adds the following edges to User. +func (uuo *UserUpdateOne) AddFollowing(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.AddFollowingIDs(ids...) +} + +// SetTeamID sets the team edge to Pet by id. +func (uuo *UserUpdateOne) SetTeamID(id string) *UserUpdateOne { + if uuo.team == nil { + uuo.team = make(map[string]struct{}) + } + uuo.team[id] = struct{}{} + return uuo +} + +// SetNillableTeamID sets the team edge to Pet by id if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableTeamID(id *string) *UserUpdateOne { + if id != nil { + uuo = uuo.SetTeamID(*id) + } + return uuo +} + +// SetTeam sets the team edge to Pet. +func (uuo *UserUpdateOne) SetTeam(p *Pet) *UserUpdateOne { + return uuo.SetTeamID(p.ID) +} + +// SetSpouseID sets the spouse edge to User by id. +func (uuo *UserUpdateOne) SetSpouseID(id string) *UserUpdateOne { + if uuo.spouse == nil { + uuo.spouse = make(map[string]struct{}) + } + uuo.spouse[id] = struct{}{} + return uuo +} + +// SetNillableSpouseID sets the spouse edge to User by id if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableSpouseID(id *string) *UserUpdateOne { + if id != nil { + uuo = uuo.SetSpouseID(*id) + } + return uuo +} + +// SetSpouse sets the spouse edge to User. +func (uuo *UserUpdateOne) SetSpouse(u *User) *UserUpdateOne { + return uuo.SetSpouseID(u.ID) +} + +// AddChildIDs adds the children edge to User by ids. +func (uuo *UserUpdateOne) AddChildIDs(ids ...string) *UserUpdateOne { + if uuo.children == nil { + uuo.children = make(map[string]struct{}) + } + for i := range ids { + uuo.children[ids[i]] = struct{}{} + } + return uuo +} + +// AddChildren adds the children edges to User. +func (uuo *UserUpdateOne) AddChildren(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.AddChildIDs(ids...) +} + +// SetParentID sets the parent edge to User by id. +func (uuo *UserUpdateOne) SetParentID(id string) *UserUpdateOne { + if uuo.parent == nil { + uuo.parent = make(map[string]struct{}) + } + uuo.parent[id] = struct{}{} + return uuo +} + +// SetNillableParentID sets the parent edge to User by id if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableParentID(id *string) *UserUpdateOne { + if id != nil { + uuo = uuo.SetParentID(*id) + } + return uuo +} + +// SetParent sets the parent edge to User. +func (uuo *UserUpdateOne) SetParent(u *User) *UserUpdateOne { + return uuo.SetParentID(u.ID) +} + +// ClearCard clears the card edge to Card. +func (uuo *UserUpdateOne) ClearCard() *UserUpdateOne { + uuo.clearedCard = true + return uuo +} + +// RemovePetIDs removes the pets edge to Pet by ids. +func (uuo *UserUpdateOne) RemovePetIDs(ids ...string) *UserUpdateOne { + if uuo.removedPets == nil { + uuo.removedPets = make(map[string]struct{}) + } + for i := range ids { + uuo.removedPets[ids[i]] = struct{}{} + } + return uuo +} + +// RemovePets removes pets edges to Pet. +func (uuo *UserUpdateOne) RemovePets(p ...*Pet) *UserUpdateOne { + ids := make([]string, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return uuo.RemovePetIDs(ids...) +} + +// RemoveFileIDs removes the files edge to File by ids. +func (uuo *UserUpdateOne) RemoveFileIDs(ids ...string) *UserUpdateOne { + if uuo.removedFiles == nil { + uuo.removedFiles = make(map[string]struct{}) + } + for i := range ids { + uuo.removedFiles[ids[i]] = struct{}{} + } + return uuo +} + +// RemoveFiles removes files edges to File. +func (uuo *UserUpdateOne) RemoveFiles(f ...*File) *UserUpdateOne { + ids := make([]string, len(f)) + for i := range f { + ids[i] = f[i].ID + } + return uuo.RemoveFileIDs(ids...) +} + +// RemoveGroupIDs removes the groups edge to Group by ids. +func (uuo *UserUpdateOne) RemoveGroupIDs(ids ...string) *UserUpdateOne { + if uuo.removedGroups == nil { + uuo.removedGroups = make(map[string]struct{}) + } + for i := range ids { + uuo.removedGroups[ids[i]] = struct{}{} + } + return uuo +} + +// RemoveGroups removes groups edges to Group. +func (uuo *UserUpdateOne) RemoveGroups(g ...*Group) *UserUpdateOne { + ids := make([]string, len(g)) + for i := range g { + ids[i] = g[i].ID + } + return uuo.RemoveGroupIDs(ids...) +} + +// RemoveFriendIDs removes the friends edge to User by ids. +func (uuo *UserUpdateOne) RemoveFriendIDs(ids ...string) *UserUpdateOne { + if uuo.removedFriends == nil { + uuo.removedFriends = make(map[string]struct{}) + } + for i := range ids { + uuo.removedFriends[ids[i]] = struct{}{} + } + return uuo +} + +// RemoveFriends removes friends edges to User. +func (uuo *UserUpdateOne) RemoveFriends(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.RemoveFriendIDs(ids...) +} + +// RemoveFollowerIDs removes the followers edge to User by ids. +func (uuo *UserUpdateOne) RemoveFollowerIDs(ids ...string) *UserUpdateOne { + if uuo.removedFollowers == nil { + uuo.removedFollowers = make(map[string]struct{}) + } + for i := range ids { + uuo.removedFollowers[ids[i]] = struct{}{} + } + return uuo +} + +// RemoveFollowers removes followers edges to User. +func (uuo *UserUpdateOne) RemoveFollowers(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.RemoveFollowerIDs(ids...) +} + +// RemoveFollowingIDs removes the following edge to User by ids. +func (uuo *UserUpdateOne) RemoveFollowingIDs(ids ...string) *UserUpdateOne { + if uuo.removedFollowing == nil { + uuo.removedFollowing = make(map[string]struct{}) + } + for i := range ids { + uuo.removedFollowing[ids[i]] = struct{}{} + } + return uuo +} + +// RemoveFollowing removes following edges to User. +func (uuo *UserUpdateOne) RemoveFollowing(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.RemoveFollowingIDs(ids...) +} + +// ClearTeam clears the team edge to Pet. +func (uuo *UserUpdateOne) ClearTeam() *UserUpdateOne { + uuo.clearedTeam = true + return uuo +} + +// ClearSpouse clears the spouse edge to User. +func (uuo *UserUpdateOne) ClearSpouse() *UserUpdateOne { + uuo.clearedSpouse = true + return uuo +} + +// RemoveChildIDs removes the children edge to User by ids. +func (uuo *UserUpdateOne) RemoveChildIDs(ids ...string) *UserUpdateOne { + if uuo.removedChildren == nil { + uuo.removedChildren = make(map[string]struct{}) + } + for i := range ids { + uuo.removedChildren[ids[i]] = struct{}{} + } + return uuo +} + +// RemoveChildren removes children edges to User. +func (uuo *UserUpdateOne) RemoveChildren(u ...*User) *UserUpdateOne { + ids := make([]string, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return uuo.RemoveChildIDs(ids...) +} + +// ClearParent clears the parent edge to User. +func (uuo *UserUpdateOne) ClearParent() *UserUpdateOne { + uuo.clearedParent = true + return uuo +} + +// Save executes the query and returns the updated entity. +func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { + if len(uuo.card) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"card\"") + } + if len(uuo.team) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"team\"") + } + if len(uuo.spouse) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"spouse\"") + } + if len(uuo.parent) > 1 { + return nil, errors.New("ent: multiple assignments on a unique edge \"parent\"") + } + switch uuo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return uuo.sqlSave(ctx) + case dialect.Neptune: + return uuo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { + u, err := uuo.Save(ctx) + if err != nil { + panic(err) + } + return u +} + +// Exec executes the query on the entity. +func (uuo *UserUpdateOne) Exec(ctx context.Context) error { + _, err := uuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uuo *UserUpdateOne) ExecX(ctx context.Context) { + if err := uuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (u *User, err error) { + selector := sql.Select(user.Columns...).From(sql.Table(user.Table)) + user.ID(uuo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = uuo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + u = &User{config: uuo.config} + if err := u.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into User: %v", err) + } + id = u.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: User not found with id: %v", uuo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one User with the same id: %v", uuo.id) + } + + tx, err := uuo.driver.Tx(ctx) + if err != nil { + return nil, err + } + var ( + update bool + res sql.Result + builder = sql.Update(user.Table).Where(sql.InInts(user.FieldID, ids...)) + ) + if uuo.age != nil { + update = true + builder.Set(user.FieldAge, *uuo.age) + u.Age = *uuo.age + } + if uuo.name != nil { + update = true + builder.Set(user.FieldName, *uuo.name) + u.Name = *uuo.name + } + if uuo.last != nil { + update = true + builder.Set(user.FieldLast, *uuo.last) + u.Last = *uuo.last + } + if uuo.nickname != nil { + update = true + builder.Set(user.FieldNickname, *uuo.nickname) + u.Nickname = *uuo.nickname + } + if uuo.phone != nil { + update = true + builder.Set(user.FieldPhone, *uuo.phone) + u.Phone = *uuo.phone + } + if update { + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if uuo.clearedCard { + query, args := sql.Update(user.CardTable). + SetNull(user.CardColumn). + Where(sql.InInts(card.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.card) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(uuo.card)[0]) + if serr != nil { + return nil, err + } + query, args := sql.Update(user.CardTable). + Set(user.CardColumn, id). + Where(sql.EQ(card.FieldID, eid).And().IsNull(user.CardColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uuo.card) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"card\" %v already connected to a different \"User\"", keys(uuo.card))}) + } + } + } + if len(uuo.removedPets) > 0 { + eids := make([]int, len(uuo.removedPets)) + for eid := range uuo.removedPets { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(user.PetsTable). + SetNull(user.PetsColumn). + Where(sql.InInts(user.PetsColumn, ids...)). + Where(sql.InInts(pet.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.pets) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range uuo.pets { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(pet.FieldID, eid) + } + query, args := sql.Update(user.PetsTable). + Set(user.PetsColumn, id). + Where(sql.And(p, sql.IsNull(user.PetsColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uuo.pets) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"pets\" %v already connected to a different \"User\"", keys(uuo.pets))}) + } + } + } + if len(uuo.removedFiles) > 0 { + eids := make([]int, len(uuo.removedFiles)) + for eid := range uuo.removedFiles { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(user.FilesTable). + SetNull(user.FilesColumn). + Where(sql.InInts(user.FilesColumn, ids...)). + Where(sql.InInts(file.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.files) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range uuo.files { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(file.FieldID, eid) + } + query, args := sql.Update(user.FilesTable). + Set(user.FilesColumn, id). + Where(sql.And(p, sql.IsNull(user.FilesColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uuo.files) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"files\" %v already connected to a different \"User\"", keys(uuo.files))}) + } + } + } + if len(uuo.removedGroups) > 0 { + eids := make([]int, len(uuo.removedGroups)) + for eid := range uuo.removedGroups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.GroupsTable). + Where(sql.InInts(user.GroupsPrimaryKey[0], ids...)). + Where(sql.InInts(user.GroupsPrimaryKey[1], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.groups) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uuo.groups { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(user.GroupsTable). + Columns(user.GroupsPrimaryKey[0], user.GroupsPrimaryKey[1]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.removedFriends) > 0 { + eids := make([]int, len(uuo.removedFriends)) + for eid := range uuo.removedFriends { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.FriendsTable). + Where(sql.InInts(user.FriendsPrimaryKey[0], ids...)). + Where(sql.InInts(user.FriendsPrimaryKey[1], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + query, args = sql.Delete(user.FriendsTable). + Where(sql.InInts(user.FriendsPrimaryKey[1], ids...)). + Where(sql.InInts(user.FriendsPrimaryKey[0], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.friends) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uuo.friends { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}, []int{eid, id}) + } + } + builder := sql.Insert(user.FriendsTable). + Columns(user.FriendsPrimaryKey[0], user.FriendsPrimaryKey[1]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.removedFollowers) > 0 { + eids := make([]int, len(uuo.removedFollowers)) + for eid := range uuo.removedFollowers { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.FollowersTable). + Where(sql.InInts(user.FollowersPrimaryKey[1], ids...)). + Where(sql.InInts(user.FollowersPrimaryKey[0], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.followers) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uuo.followers { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(user.FollowersTable). + Columns(user.FollowersPrimaryKey[1], user.FollowersPrimaryKey[0]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.removedFollowing) > 0 { + eids := make([]int, len(uuo.removedFollowing)) + for eid := range uuo.removedFollowing { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Delete(user.FollowingTable). + Where(sql.InInts(user.FollowingPrimaryKey[0], ids...)). + Where(sql.InInts(user.FollowingPrimaryKey[1], eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.following) > 0 { + values := make([][]int, 0, len(ids)) + for _, id := range ids { + for eid := range uuo.following { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + values = append(values, []int{id, eid}) + } + } + builder := sql.Insert(user.FollowingTable). + Columns(user.FollowingPrimaryKey[0], user.FollowingPrimaryKey[1]) + for _, v := range values { + builder.Values(v[0], v[1]) + } + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if uuo.clearedTeam { + query, args := sql.Update(user.TeamTable). + SetNull(user.TeamColumn). + Where(sql.InInts(pet.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.team) > 0 { + for _, id := range ids { + eid, serr := strconv.Atoi(keys(uuo.team)[0]) + if serr != nil { + return nil, err + } + query, args := sql.Update(user.TeamTable). + Set(user.TeamColumn, id). + Where(sql.EQ(pet.FieldID, eid).And().IsNull(user.TeamColumn)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uuo.team) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"team\" %v already connected to a different \"User\"", keys(uuo.team))}) + } + } + } + if uuo.clearedSpouse { + query, args := sql.Update(user.SpouseTable). + SetNull(user.SpouseColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + query, args = sql.Update(user.SpouseTable). + SetNull(user.SpouseColumn). + Where(sql.InInts(user.SpouseColumn, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.spouse) > 0 { + if n := len(ids); n > 1 { + return nil, rollback(tx, fmt.Errorf("ent: can't link O2O edge \"spouse\" to %d vertices (> 1)", n)) + } + for eid := range uuo.spouse { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(user.SpouseTable). + Set(user.SpouseColumn, eid). + Where(sql.EQ(user.FieldID, ids[0])).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + query, args = sql.Update(user.SpouseTable). + Set(user.SpouseColumn, ids[0]). + Where(sql.EQ(user.FieldID, eid).And().IsNull(user.SpouseColumn)).Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uuo.spouse) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("\"spouse\" (%v) already connected to a different \"User\"", eid)}) + } + } + } + if len(uuo.removedChildren) > 0 { + eids := make([]int, len(uuo.removedChildren)) + for eid := range uuo.removedChildren { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + eids = append(eids, eid) + } + query, args := sql.Update(user.ChildrenTable). + SetNull(user.ChildrenColumn). + Where(sql.InInts(user.ChildrenColumn, ids...)). + Where(sql.InInts(user.FieldID, eids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.children) > 0 { + for _, id := range ids { + p := sql.P() + for eid := range uuo.children { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + p.Or().EQ(user.FieldID, eid) + } + query, args := sql.Update(user.ChildrenTable). + Set(user.ChildrenColumn, id). + Where(sql.And(p, sql.IsNull(user.ChildrenColumn))). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + affected, err := res.RowsAffected() + if err != nil { + return nil, rollback(tx, err) + } + if int(affected) < len(uuo.children) { + return nil, rollback(tx, &ErrConstraintFailed{msg: fmt.Sprintf("one of \"children\" %v already connected to a different \"User\"", keys(uuo.children))}) + } + } + } + if uuo.clearedParent { + query, args := sql.Update(user.ParentTable). + SetNull(user.ParentColumn). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + if len(uuo.parent) > 0 { + for eid := range uuo.parent { + eid, serr := strconv.Atoi(eid) + if serr != nil { + err = serr + return + } + query, args := sql.Update(user.ParentTable). + Set(user.ParentColumn, eid). + Where(sql.InInts(user.FieldID, ids...)). + Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + } + } + if err = tx.Commit(); err != nil { + return nil, err + } + return u, nil +} + +func (uuo *UserUpdateOne) gremlinSave(ctx context.Context) (*User, error) { + res := &gremlin.Response{} + query, bindings := uuo.gremlin(uuo.id).Query() + if err := uuo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + u := &User{config: uuo.config} + if err := u.FromResponse(res); err != nil { + return nil, err + } + return u, nil +} + +func (uuo *UserUpdateOne) gremlin(id string) *dsl.Traversal { + type constraint struct { + pred *dsl.Traversal // constraint predicate. + test *dsl.Traversal // test matches and its constant. + } + constraints := make([]*constraint, 0, 8) + v := g.V(id) + var ( + rv = v.Clone() + trs []*dsl.Traversal + ) + if uuo.age != nil { + v.Property(dsl.Single, user.FieldAge, *uuo.age) + } + if uuo.name != nil { + v.Property(dsl.Single, user.FieldName, *uuo.name) + } + if uuo.last != nil { + v.Property(dsl.Single, user.FieldLast, *uuo.last) + } + if uuo.nickname != nil { + constraints = append(constraints, &constraint{ + pred: g.V().Has(user.Label, user.FieldNickname, *uuo.nickname).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField(user.Label, user.FieldNickname, *uuo.nickname)), + }) + v.Property(dsl.Single, user.FieldNickname, *uuo.nickname) + } + if uuo.phone != nil { + constraints = append(constraints, &constraint{ + pred: g.V().Has(user.Label, user.FieldPhone, *uuo.phone).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueField(user.Label, user.FieldPhone, *uuo.phone)), + }) + v.Property(dsl.Single, user.FieldPhone, *uuo.phone) + } + if uuo.clearedCard { + tr := rv.Clone().OutE(user.CardLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.card { + v.AddE(user.CardLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.CardLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.CardLabel, id)), + }) + } + for id := range uuo.removedPets { + tr := rv.Clone().OutE(user.PetsLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.pets { + v.AddE(user.PetsLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.PetsLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.PetsLabel, id)), + }) + } + for id := range uuo.removedFiles { + tr := rv.Clone().OutE(user.FilesLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.files { + v.AddE(user.FilesLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.FilesLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.FilesLabel, id)), + }) + } + for id := range uuo.removedGroups { + tr := rv.Clone().OutE(user.GroupsLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.groups { + v.AddE(user.GroupsLabel).To(g.V(id)).OutV() + } + for id := range uuo.removedFriends { + tr := rv.Clone().BothE(user.FriendsLabel).Where(__.Or(__.InV().HasID(id), __.OutV().HasID(id))).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.friends { + v.AddE(user.FriendsLabel).To(g.V(id)).OutV() + } + for id := range uuo.removedFollowers { + tr := rv.Clone().InE(user.FollowingLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.followers { + v.AddE(user.FollowingLabel).From(g.V(id)).InV() + } + for id := range uuo.removedFollowing { + tr := rv.Clone().OutE(user.FollowingLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.following { + v.AddE(user.FollowingLabel).To(g.V(id)).OutV() + } + if uuo.clearedTeam { + tr := rv.Clone().OutE(user.TeamLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.team { + v.AddE(user.TeamLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.TeamLabel).InV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.TeamLabel, id)), + }) + } + if uuo.clearedSpouse { + tr := rv.Clone().BothE(user.SpouseLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.spouse { + v.AddE(user.SpouseLabel).To(g.V(id)).OutV() + constraints = append(constraints, &constraint{ + pred: rv.Clone().Both(user.SpouseLabel).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.SpouseLabel, id)), + }) + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.SpouseLabel).Where(__.Or(__.InV().HasID(id), __.OutV().HasID(id))).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.SpouseLabel, id)), + }) + } + for id := range uuo.removedChildren { + tr := rv.Clone().InE(user.ParentLabel).Where(__.OtherV().HasID(id)).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.children { + v.AddE(user.ParentLabel).From(g.V(id)).InV() + constraints = append(constraints, &constraint{ + pred: g.E().HasLabel(user.ParentLabel).OutV().HasID(id).Count(), + test: __.Is(p.NEQ(0)).Constant(NewErrUniqueEdge(user.Label, user.ParentLabel, id)), + }) + } + if uuo.clearedParent { + tr := rv.Clone().OutE(user.ParentLabel).Drop().Iterate() + trs = append(trs, tr) + } + for id := range uuo.parent { + v.AddE(user.ParentLabel).To(g.V(id)).OutV() + } + v.ValueMap(true) + if len(constraints) > 0 { + v = constraints[0].pred.Coalesce(constraints[0].test, v) + for _, cr := range constraints[1:] { + v = cr.pred.Coalesce(cr.test, v) + } + } + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/integration_test.go b/entc/integration/integration_test.go new file mode 100644 index 000000000..c8bd70a23 --- /dev/null +++ b/entc/integration/integration_test.go @@ -0,0 +1,1718 @@ +package integration + +import ( + "context" + "fmt" + "net/url" + "reflect" + "runtime" + "sort" + "strings" + "testing" + "time" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + "fbc/ent/entc/integration/ent" + "fbc/ent/entc/integration/ent/card" + "fbc/ent/entc/integration/ent/group" + "fbc/ent/entc/integration/ent/groupinfo" + "fbc/ent/entc/integration/ent/node" + "fbc/ent/entc/integration/ent/pet" + "fbc/ent/entc/integration/ent/user" + "fbc/lib/go/gremlin" + + _ "github.com/go-sql-driver/mysql" + _ "github.com/mattn/go-sqlite3" + "github.com/stretchr/testify/require" +) + +func TestSQLite(t *testing.T) { + db, err := sql.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + if err != nil { + t.Skipf("sqlite is not available: %v", err) + } + defer db.Close() + drv := dialect.Driver(db) + if testing.Verbose() { + drv = dialect.Debug(drv) + } + client := ent.NewClient(ent.Driver(drv)) + require.NoError(t, client.Schema.Create(context.Background())) + for _, tt := range tests { + name := runtime.FuncForPC(reflect.ValueOf(tt).Pointer()).Name() + t.Run(strings.Split(name, ".")[1], func(t *testing.T) { + drop(t, client) + tt(t, client) + }) + } +} + +func TestMySQL(t *testing.T) { + var drv dialect.Driver + drv, err := sql.Open("mysql", "root:pass@tcp(localhost:3306)/test?charset=utf8&parseTime=True") + require.NoError(t, err) + defer drv.Close() + if testing.Verbose() { + drv = dialect.Debug(drv) + } + client := ent.NewClient(ent.Driver(drv)) + require.NoError(t, client.Schema.Create(context.Background())) + for _, tt := range tests { + name := runtime.FuncForPC(reflect.ValueOf(tt).Pointer()).Name() + t.Run(strings.Split(name, ".")[1], func(t *testing.T) { + drop(t, client) + tt(t, client) + }) + } +} + +func TestGremlin(t *testing.T) { + c, err := gremlin.NewClient(gremlin.Config{ + Endpoint: gremlin.Endpoint{ + URL: &url.URL{ + Scheme: "http", + Host: "localhost:8182", + }, + }, + }) + require.NoError(t, err) + var drv dialect.Driver = dialect.NewGremlin(c) + if testing.Verbose() { + drv = dialect.Debug(drv, t.Log) + } + client := ent.NewClient(ent.Driver(drv)) + // run all tests except transaction tests. + for _, tt := range tests[1:] { + name := runtime.FuncForPC(reflect.ValueOf(tt).Pointer()).Name() + t.Run(strings.Split(name, ".")[1], func(t *testing.T) { + drop(t, client) + tt(t, client) + }) + } +} + +// tests for all drivers to run. +var tests = []func(*testing.T, *ent.Client){ + Tx, + Sanity, + Relation, + UniqueConstraint, + O2OTwoTypes, + O2OSameType, + O2OSelfRef, + O2MTwoTypes, + O2MSameType, + M2MSelfRef, + M2MSameType, + M2MTwoTypes, +} + +func Sanity(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + usr := client.User.Create().SetName("foo").SetAge(20).SaveX(ctx) + require.Equal("foo", usr.Name) + require.Equal(20, usr.Age) + require.NotEmpty(usr.ID) + client.User.Query().OnlyX(ctx) + client.User.Delete().ExecX(ctx) + require.Empty(client.User.Query().AllX(ctx)) + pt := client.Pet.Create().SetName("pedro").SaveX(ctx) + usr = client.User.Create().SetName("foo").SetAge(20).AddPets(pt).SaveX(ctx) + child := client.User.Create().SetName("bar").SetAge(20).AddChildren(usr).SaveX(ctx) + inf := client.GroupInfo.Create().SetDesc("desc").SaveX(ctx) + grp := client.Group.Create().SetName("Github").SetExpire(time.Now()).AddUsers(usr, child).SetInfo(inf).SaveX(ctx) + require.Len(grp.QueryUsers().AllX(ctx), 2) + usr.QueryGroups().OnlyX(ctx) + child.QueryGroups().OnlyX(ctx) + usr2 := client.User.Create().SetName("qux").SetAge(20).SetSpouse(usr).SaveX(ctx) + usr2.QuerySpouse().OnlyX(ctx) + usr.QuerySpouse().OnlyX(ctx) + require.Equal(usr.Name, usr.QueryPets().QueryOwner().OnlyX(ctx).Name) + require.Equal(pt.Name, usr.QueryPets().QueryOwner().QueryPets().OnlyX(ctx).Name) + require.Empty(usr.QuerySpouse().QueryPets().AllX(ctx)) + require.Equal(pt.Name, usr2.QuerySpouse().QueryPets().OnlyX(ctx).Name) + require.Len(usr.QueryGroups().QueryUsers().AllX(ctx), 2) + require.Len(usr.QueryGroups().QueryUsers().QueryGroups().AllX(ctx), 1, "should be unique by default") + require.Len(usr.QueryGroups().AllX(ctx), 1) + require.Len(client.User.Query().Where(user.HasPets()).AllX(ctx), 1) + require.Len(client.User.Query().Where(user.HasSpouse()).AllX(ctx), 2) + require.Len(client.User.Query().Where(ent.Not(user.HasSpouse())).AllX(ctx), 1) + require.Len(client.User.Query().Where(user.HasGroups()).AllX(ctx), 2) + require.Len(client.Group.Query().Where(group.HasUsers()).AllX(ctx), 1) + require.Len(client.Group.Query().Where(group.HasUsersWith(user.Name("foo"))).AllX(ctx), 1) + require.Len(client.User.Query().Where(user.HasGroupsWith(group.NameHasPrefix("G"))).AllX(ctx), 2) + require.Equal(3, client.User.Query().CountX(ctx)) + require.Equal(client.Group.Query().Where(group.HasUsersWith(user.Name("foo"))).CountX(ctx), 1) + require.True(client.User.Query().ExistX(ctx)) + require.True(client.User.Query().Where(user.HasPetsWith(pet.NameHasPrefix("ped"))).ExistX(ctx)) + require.False(client.User.Query().Where(user.HasPetsWith(pet.NameHasPrefix("pan"))).ExistX(ctx)) + require.Equal(child.Name, client.User.Query().Order(ent.Asc("name")).FirstX(ctx).Name) + require.Equal(usr2.Name, client.User.Query().Order(ent.Desc("name")).FirstX(ctx).Name) + // update fields. + client.User.Update().Where(user.ID(child.ID)).SetName("Ariel").SaveX(ctx) + client.User.Query().Where(user.Name("Ariel")).OnlyX(ctx) + // update edges. + require.Empty(child.QueryPets().AllX(ctx)) + require.NoError(client.Pet.UpdateOne(pt).ClearOwner().Exec(ctx)) + client.User.Update().Where(user.ID(child.ID)).AddPets(pt).SaveX(ctx) + require.NotEmpty(child.QueryPets().AllX(ctx)) + client.User.Update().Where(user.ID(child.ID)).RemovePets(pt).SaveX(ctx) + require.Empty(child.QueryPets().AllX(ctx)) + // remove edges. + client.User.Update().ClearSpouse().SaveX(ctx) + require.Empty(client.User.Query().Where(user.HasSpouse()).AllX(ctx)) + client.User.Update().AddFriends(child).RemoveGroups(grp).Where(user.ID(usr.ID)).SaveX(ctx) + require.NotEmpty(child.QueryGroups().AllX(ctx)) + require.Empty(usr.QueryGroups().AllX(ctx)) + require.Len(child.QueryFriends().AllX(ctx), 1) + require.Len(usr.QueryFriends().AllX(ctx), 1) + // update one vertex. + usr = client.User.UpdateOne(usr).SetName("baz").AddGroups(grp).SaveX(ctx) + require.Equal("baz", usr.Name) + require.NotEmpty(usr.QueryGroups().AllX(ctx)) + // grouping. + var v []struct { + Name string `json:"name"` + Age int `json:"age"` + Sum int `json:"sum"` + Count int `json:"count"` + } + client.User.Query(). + GroupBy(user.FieldName, user.FieldAge). + Aggregate(ent.Count(), ent.Sum(user.FieldAge)). + ScanX(ctx, &v) + require.NotEmpty(v) + // IN predicates. + ids := client.User.Query().IDsX(ctx) + require.Len(ids, 3) + client.User.Delete().Where(user.IDIn(ids...)).ExecX(ctx) + ids = client.User.Query().IDsX(ctx) + require.Empty(ids) + // nop. + client.User.Delete().Where(user.IDIn(ids...)).ExecX(ctx) +} + +func Relation(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + t.Log("querying group info") + info, err := client.GroupInfo. + Query(). + First(ctx) + require.Nil(info) + require.True(ent.IsNotFound(err)) + + t.Log("creating group info") + info = client.GroupInfo. + Create(). + SetDesc("group info"). + SaveX(ctx) + t.Logf("group info created: %v", info) + + t.Log("creating group") + grp := client.Group. + Create(). + SetInfo(info). + SetName("Github"). + SetExpire(time.Now().Add(time.Hour)). + SaveX(ctx) + require.NotZero(grp.ID) + require.Equal(grp.MaxUsers, 10) + require.Equal(grp.Name, "Github") + t.Logf("group created: %v", grp) + + t.Log("creating user") + usr := client.User. + Create(). + SetAge(20). + SetName("a8m"). + AddGroups(grp). + SaveX(ctx) + require.NotZero(usr.ID) + require.Equal(usr.Age, 20) + require.Equal(usr.Name, "a8m") + require.Equal(usr.Last, "unknown") + t.Logf("user created: %v", usr) + + t.Log("querying assoc edges") + groups := usr.QueryGroups().IDsX(ctx) + require.NotEmpty(groups) + require.Equal(grp.ID, groups[0]) + t.Log("querying inverse edge") + users := grp.QueryUsers().IDsX(ctx) + require.NotEmpty(users) + require.Equal(usr.ID, users[0]) + + t.Log("remove group edge") + client.User.UpdateOne(usr).RemoveGroups(grp).ExecX(ctx) + require.Empty(grp.QueryUsers().AllX(ctx)) + require.Empty(usr.QueryGroups().AllX(ctx)) + t.Logf("add group edge") + client.User.UpdateOne(usr).AddGroups(grp).ExecX(ctx) + require.NotEmpty(grp.QueryUsers().AllX(ctx)) + require.NotEmpty(usr.QueryGroups().AllX(ctx)) + t.Log("remove users inverse edge") + client.Group.UpdateOne(grp).RemoveUsers(usr).ExecX(ctx) + require.Empty(grp.QueryUsers().AllX(ctx)) + require.Empty(usr.QueryGroups().AllX(ctx)) + t.Logf("add group inverse edge") + client.Group.UpdateOne(grp).AddUsers(usr).ExecX(ctx) + require.NotEmpty(grp.QueryUsers().AllX(ctx)) + require.NotEmpty(usr.QueryGroups().AllX(ctx)) + + t.Log("count vertices") + require.Equal(1, client.User.Query().CountX(ctx)) + require.Equal(1, client.Group.Query().CountX(ctx)) + + t.Log("get only vertices") + require.NotNil(client.User.Query().OnlyX(ctx)) + require.NotNil(client.Group.Query().OnlyX(ctx)) + + t.Log("get only ids") + require.NotEmpty(client.User.Query().OnlyXID(ctx)) + require.NotEmpty(client.Group.Query().OnlyXID(ctx)) + + t.Log("query spouse edge") + require.Zero(client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + neta := client.User.Create().SetName("neta").SetAge(18).SetSpouse(usr).SaveX(ctx) + require.Equal(2, client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + + t.Log("check for singular error") + _, err = client.User.Query().Only(ctx) + require.True(ent.IsNotSingular(err)) + + t.Log("query parent/children edges") + require.False(usr.QueryParent().ExistX(ctx)) + require.Empty(usr.QueryChildren().AllX(ctx)) + child := client.User.Create().SetName("pedro").SetAge(7).SetParent(usr).SaveX(ctx) + require.Equal(usr.Name, child.QueryParent().OnlyX(ctx).Name) + require.Equal(child.Name, usr.QueryChildren().OnlyX(ctx).Name) + require.False(usr.QueryParent().ExistX(ctx)) + + t.Log("clear parent edge") + brat := client.User.Create().SetName("brat").SetAge(19).SetParent(usr).SaveX(ctx) + require.Equal(2, usr.QueryChildren().CountX(ctx)) + brat = client.User.UpdateOne(brat).ClearParent().SaveX(ctx) + _, err = client.User.UpdateOne(brat).ClearParent().Save(ctx) + require.NoError(err) + require.False(brat.QueryParent().ExistX(ctx)) + require.Equal(1, usr.QueryChildren().CountX(ctx)) + + t.Log("delete child clears edge") + brat = client.User.UpdateOne(brat).SetParent(usr).SaveX(ctx) + require.Equal(2, usr.QueryChildren().CountX(ctx)) + client.User.DeleteOne(brat).ExecX(ctx) + require.Equal(1, usr.QueryChildren().CountX(ctx)) + + client.Group.UpdateOne(grp).AddBlocked(neta).SaveX(ctx) + blocked := usr.QueryGroups().OnlyX(ctx).QueryBlocked().OnlyX(ctx) + t.Log("blocked:", blocked) + + t.Log("query users with or condition") + require.Len(client.User.Query().Where(ent.Or(user.Name("a8m"), user.Name("neta"))).AllX(ctx), 2) + require.Len(client.User.Query().Where(ent.Or(user.Name("a8m"), user.Name("noam"))).AllX(ctx), 1) + require.Zero(client.User.Query().Where(ent.Or(user.Name("alex"), user.Name("noam"))).AllX(ctx)) + + t.Log("query using the in predicate") + require.Len(client.User.Query().Where(user.NameIn("a8m", "neta")).AllX(ctx), 2) + require.Len(client.User.Query().Where(user.NameIn("a8m", "alex")).AllX(ctx), 1) + require.Len(client.User.Query().Where(user.IDIn(neta.ID)).AllX(ctx), 1) + + t.Log("query existence") + require.True(client.User.Query().Where(user.Name("a8m")).Exist(ctx)) + require.False(client.User.Query().Where(user.Name("alex")).Exist(ctx)) + + t.Log("query using get") + require.Equal(usr.Name, client.User.Query().GetX(ctx, usr.ID).Name) + uid, err := client.User.Query().Where(ent.Not(user.Name(usr.Name))).Get(ctx, usr.ID) + require.Error(err) + require.Nil(uid) + + t.Log("test validators") + _, err = client.Group.Create().SetInfo(info).SetType("a").SetName("Gituhb").SetExpire(time.Now().Add(time.Hour)).Save(ctx) + require.Error(err, "type validator failed") + _, err = client.Group.Create().SetInfo(info).SetType("pass").SetName("failed").SetExpire(time.Now().Add(time.Hour)).Save(ctx) + require.Error(err, "name validator failed") + _, err = client.Group.Create().SetInfo(info).SetType("pass").SetName("Github20").SetExpire(time.Now().Add(time.Hour)).Save(ctx) + require.Error(err, "name validator failed") + _, err = client.Group.Create().SetInfo(info).SetType("pass").SetName("Github").SetMaxUsers(-1).SetExpire(time.Now().Add(time.Hour)).Save(ctx) + require.Error(err, "max_users validator failed") + _, err = client.Group.Update().SetMaxUsers(-10).Save(ctx) + require.Error(err, "max_users validator failed") + _, err = client.Group.UpdateOne(grp).SetMaxUsers(-10).Save(ctx) + require.Error(err, "max_users validator failed") + + t.Log("query using edge-with predicate") + require.Len(usr.QueryGroups().Where(group.HasInfoWith(groupinfo.Desc("group info"))).AllX(ctx), 1) + require.Empty(usr.QueryGroups().Where(group.HasInfoWith(groupinfo.Desc("missing info"))).AllX(ctx)) + t.Log("query using edge-with predicate on inverse edges") + require.Len(client.Group.Query().Where(group.Name("Github"), group.HasUsersWith(user.Name("a8m"))).AllX(ctx), 1) + require.Empty(client.Group.Query().Where(group.Name("Github"), group.HasUsersWith(user.Name("alex"))).AllX(ctx)) + t.Logf("query path using edge-with predicate") + require.Len(client.GroupInfo.Query().Where(groupinfo.HasGroupsWith(group.HasUsersWith(user.Name("a8m")))).AllX(ctx), 1) + require.Empty(client.GroupInfo.Query().Where(groupinfo.HasGroupsWith(group.HasUsersWith(user.Name("alex")))).AllX(ctx)) + require.Len(client.GroupInfo.Query().Where(ent.Or(groupinfo.Desc("group info"), groupinfo.HasGroupsWith(group.HasUsersWith(user.Name("alex"))))).AllX(ctx), 1) + + t.Log("query with ordering") + u1 := client.User.Query().Order(ent.Asc(user.FieldName)).FirstXID(ctx) + u2 := client.User.Query().Order(ent.Desc(user.FieldName)).FirstXID(ctx) + require.NotEqual(u1, u2) + u1 = client.User.Query().Order(ent.Asc(user.FieldLast), ent.Asc(user.FieldAge)).FirstXID(ctx) + u2 = client.User.Query().Order(ent.Asc(user.FieldLast), ent.Desc(user.FieldAge)).FirstXID(ctx) + require.NotEqual(u1, u2) + u1 = client.User.Query().Order(ent.Asc(user.FieldName, user.FieldAge)).FirstXID(ctx) + u2 = client.User.Query().Order(ent.Asc(user.FieldName, user.FieldAge)).FirstXID(ctx) + require.Equal(u1, u2) + + t.Log("query path") + require.Len(client.Group.Query().QueryUsers().AllX(ctx), 1) + require.Empty(client.Group.Query().Where(group.Name("boring")).QueryUsers().AllX(ctx)) + require.Equal(neta.Name, usr.QueryGroups().Where(group.Name("Github")).QueryUsers().QuerySpouse().OnlyX(ctx).Name) + require.Empty(client.GroupInfo.Query().Where(groupinfo.Desc("group info")).QueryGroups().Where(group.Name("boring")).AllX(ctx)) + require.Equal(child.Name, client.GroupInfo.Query().Where(groupinfo.Desc("group info")).QueryGroups().Where(group.Name("Github")).QueryUsers().QueryChildren().FirstX(ctx).Name) + + t.Log("query using string predicate") + require.Len(client.User.Query().Where(user.NameIn("a8m", "neta", "pedro")).AllX(ctx), 3) + require.Empty(client.User.Query().Where(user.NameNotIn("a8m", "neta", "pedro")).AllX(ctx)) + require.Empty(client.User.Query().Where(user.NameIn("alex", "rocket")).AllX(ctx)) + require.NotNil(client.User.Query().Where(user.HasParentWith(user.NameIn("a8m", "neta"))).OnlyX(ctx)) + require.Len(client.User.Query().Where(user.NameContains("a8")).AllX(ctx), 1) + require.Len(client.User.Query().Where(user.NameHasPrefix("a8")).AllX(ctx), 1) + require.Len(client.User.Query().Where(ent.Or(user.NameHasPrefix("a8"), user.NameHasSuffix("eta"))).AllX(ctx), 2) + + t.Log("group-by one field") + names, err := client.User.Query().GroupBy(user.FieldName).Strings(ctx) + require.NoError(err) + sort.Strings(names) + require.Equal([]string{"a8m", "neta", "pedro"}, names) + ages, err := client.User.Query().GroupBy(user.FieldAge).Ints(ctx) + require.NoError(err) + require.Len(ages, 3) + + t.Log("group-by two fields with aggregation") + client.User.Create().SetName(usr.Name).SetAge(usr.Age).SaveX(ctx) + client.User.Create().SetName(neta.Name).SetAge(neta.Age).SaveX(ctx) + child2 := client.User.Create().SetName(child.Name).SetAge(child.Age + 1).SaveX(ctx) + var v []struct { + Name string `json:"name"` + Age int `json:"age"` + Sum int `json:"sum"` + Count int `json:"count"` + } + client.User.Query(). + GroupBy(user.FieldName, user.FieldAge). + Aggregate(ent.Count(), ent.Sum(user.FieldAge)). + ScanX(ctx, &v) + require.Len(v, 4) + sort.Slice(v, func(i, j int) bool { + if v[i].Name != v[j].Name { + return v[i].Name < v[j].Name + } + return v[i].Age < v[j].Age + }) + for i, usr := range []*ent.User{usr, neta} { + require.Equal(usr.Name, v[i].Name) + require.Equal(usr.Age, v[i].Age) + require.Equal(usr.Age*2, v[i].Sum) + require.Equal(2, v[i].Count, "should have 2 vertices") + } + v = v[2:] + for i, usr := range []*ent.User{child, child2} { + require.Equal(usr.Name, v[i].Name) + require.Equal(usr.Age, v[i].Age) + require.Equal(usr.Age, v[i].Sum) + require.Equal(1, v[i].Count) + } + + t.Log("group by with .as modulator") + var v2 []struct { + Name string `json:"name"` + Total int `json:"total"` + } + client.User.Query().GroupBy(user.FieldName).Aggregate(ent.As(ent.Count(), "total")).ScanX(ctx, &v2) + require.Len(v2, 3) + for i := range v2 { + require.Equal(2, v2[i].Total) + } +} +func UniqueConstraint(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("unique constraint violation on 1 field") + foo := client.User.Create().SetAge(1).SetName("foo").SetNickname("baz").SaveX(ctx) + _, err := client.User.Create().SetAge(1).SetName("bar").SetNickname("baz").Save(ctx) + require.True(ent.IsConstraintFailure(err)) + bar := client.User.Create().SetAge(1).SetName("bar").SetNickname("bar").SetPhone("1").SaveX(ctx) + + t.Log("unique constraint violation on 2 fields") + _, err = client.User.Create().SetAge(1).SetName("baz").SetNickname("bar").SetPhone("1").Save(ctx) + require.True(ent.IsConstraintFailure(err)) + _, err = client.User.Create().SetAge(1).SetName("baz").SetNickname("qux").SetPhone("1").Save(ctx) + require.True(ent.IsConstraintFailure(err)) + _, err = client.User.Create().SetAge(1).SetName("baz").SetNickname("bar").SetPhone("2").Save(ctx) + require.True(ent.IsConstraintFailure(err)) + client.User.Create().SetAge(1).SetName("baz").SetNickname("qux").SetPhone("2").SaveX(ctx) + _, err = client.User.UpdateOne(foo).SetNickname("bar").SetPhone("1").Save(ctx) + require.True(ent.IsConstraintFailure(err)) + _, err = client.User.UpdateOne(foo).SetNickname("bar").SetPhone("2").Save(ctx) + require.True(ent.IsConstraintFailure(err)) + + t.Log("o2o unique constraint on creation") + dan := client.User.Create().SetAge(1).SetName("dan").SetNickname("dan").SetSpouse(foo).SaveX(ctx) + require.Equal(dan.Name, foo.QuerySpouse().OnlyX(ctx).Name) + _, err = client.User.Create().SetAge(1).SetName("b").SetSpouse(foo).Save(ctx) + require.True(ent.IsConstraintFailure(err)) + + t.Log("o2m/m2o unique constraint on creation") + c1 := client.User.Create().SetAge(1).SetName("c1").SetNickname("c1").SetParent(foo).SaveX(ctx) + c2 := client.User.Create().SetAge(1).SetName("c2").SetNickname("c2").SetParent(foo).SaveX(ctx) + _, err = client.User.Create().SetAge(10).SetName("z").SetNickname("z").AddChildren(c1).Save(ctx) + require.True(ent.IsConstraintFailure(err), "c1 already has a parent") + _, err = client.User.Create().SetAge(10).SetName("z").SetNickname("z").AddChildren(c2).Save(ctx) + require.True(ent.IsConstraintFailure(err), "c2 already has a parent") + _, err = client.User.Create().SetAge(10).SetName("z").SetNickname("z").AddChildren(c1, c2).Save(ctx) + require.True(ent.IsConstraintFailure(err)) + + inf := client.GroupInfo.Create().SetDesc("desc").SaveX(ctx) + grp := client.Group.Create().SetName("Github").SetExpire(time.Now()).SetInfo(inf).SaveX(ctx) + _, err = client.GroupInfo.Create().SetDesc("desc").AddGroups(grp).Save(ctx) + require.True(ent.IsConstraintFailure(err)) + + p1 := client.Pet.Create().SetName("p1").SetOwner(foo).SaveX(ctx) + p2 := client.Pet.Create().SetName("p2").SetOwner(foo).SaveX(ctx) + _, err = client.User.Create().SetAge(10).SetName("new-owner").AddPets(p1, p2).Save(ctx) + require.True(ent.IsConstraintFailure(err)) + + err = client.User.UpdateOne(c2).SetNickname(c1.Nickname).Exec(ctx) + require.True(ent.IsConstraintFailure(err)) + + t.Log("o2o unique constraint on update") + err = client.User.UpdateOne(bar).SetSpouse(foo).Exec(ctx) + require.True(ent.IsConstraintFailure(err)) + err = client.User.UpdateOne(foo).SetSpouse(bar).Exec(ctx) + require.True(ent.IsConstraintFailure(err)) + client.User.UpdateOne(bar).ClearSpouse().ExecX(ctx) + client.User.UpdateOne(foo).ClearSpouse().SetSpouse(bar).ExecX(ctx) + require.False(dan.QuerySpouse().ExistX(ctx)) + require.Equal(bar.Name, foo.QuerySpouse().OnlyX(ctx).Name) + require.Equal(foo.Name, bar.QuerySpouse().OnlyX(ctx).Name) + + t.Log("o2m unique constraint on update") + _, err = client.User.UpdateOne(bar).SetAge(1).SetName("new-owner").AddPets(p1).Save(ctx) + require.True(ent.IsConstraintFailure(err)) + _, err = client.User.UpdateOne(bar).SetAge(1).SetName("new-owner").AddPets(p1, p2).Save(ctx) + require.True(ent.IsConstraintFailure(err)) + + t.Log("unique constraint violation when updating more than 1 vertex") + err = client.User.Update().SetNickname("yada").Exec(ctx) + require.True(ent.IsConstraintFailure(err)) + require.False(client.User.Query().Where(user.Nickname("yada")).ExistX(ctx)) + client.User.Update().Where(user.Nickname("dan")).SetNickname("yada").ExecX(ctx) + require.False(client.User.Query().Where(user.Nickname("dan")).ExistX(ctx)) + require.True(client.User.Query().Where(user.Nickname("yada")).ExistX(ctx)) +} + +// Demonstrate a O2O relation between two different types. A User and a CreditCard. +// The user is the owner of the edge, named "owner", and the card has an inverse edge +// named "owner" that points to the User. +func O2OTwoTypes(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new user without card") + usr := client.User.Create().SetAge(10).SetName("foo").SaveX(ctx) + require.Zero(usr.QueryCard().CountX(ctx)) + + t.Log("add card to user on card creation (inverse creation)") + crd := client.Card.Create().SetNumber("1").SetOwner(usr).SaveX(ctx) + require.Equal(usr.QueryCard().CountX(ctx), 1) + require.Equal(crd.QueryOwner().CountX(ctx), 1) + + t.Log("delete inverse should delete association") + client.Card.DeleteOne(crd).ExecX(ctx) + require.Zero(client.Card.Query().CountX(ctx)) + require.Zero(usr.QueryCard().CountX(ctx), "user should not have card") + + t.Log("add card to user by updating user (the owner of the edge)") + crd = client.Card.Create().SetNumber("10").SaveX(ctx) + usr.Update().SetCard(crd).ExecX(ctx) + require.Equal(usr.Name, crd.QueryOwner().OnlyX(ctx).Name) + require.Equal(crd.Number, usr.QueryCard().OnlyX(ctx).Number) + + t.Log("delete assoc should delete inverse edge") + client.User.DeleteOne(usr).ExecX(ctx) + require.Zero(client.User.Query().CountX(ctx)) + require.Zero(crd.QueryOwner().CountX(ctx), "card should not have an owner") + + t.Log("add card to user by updating card (the inverse edge)") + usr = client.User.Create().SetAge(10).SetName("bar").SaveX(ctx) + crd.Update().SetOwner(usr).ExecX(ctx) + require.Equal(usr.Name, crd.QueryOwner().OnlyX(ctx).Name) + require.Equal(crd.Number, usr.QueryCard().OnlyX(ctx).Number) + + t.Log("query with side lookup on inverse") + ocrd := client.Card.Create().SetNumber("orphan card").SaveX(ctx) + require.Equal(crd.Number, client.Card.Query().Where(card.HasOwner()).OnlyX(ctx).Number) + require.Equal(ocrd.Number, client.Card.Query().Where(ent.Not(card.HasOwner())).OnlyX(ctx).Number) + + t.Log("query with side lookup on assoc") + ousr := client.User.Create().SetAge(10).SetName("user without card").SaveX(ctx) + require.Equal(usr.Name, client.User.Query().Where(user.HasCard()).OnlyX(ctx).Name) + require.Equal(ousr.Name, client.User.Query().Where(ent.Not(user.HasCard())).OnlyX(ctx).Name) + + t.Log("query with side lookup condition on inverse") + require.Equal(crd.Number, client.Card.Query().Where(card.HasOwnerWith(user.Name(usr.Name))).OnlyX(ctx).Number) + // has owner, but with name != "bar". + require.Zero(client.Card.Query().Where(card.HasOwnerWith(ent.Not(user.Name(usr.Name)))).CountX(ctx)) + // either has no owner, or has owner with name != "bar". + require.Equal( + ocrd.Number, + client.Card.Query(). + Where( + ent.Or( + // has no owner. + ent.Not(card.HasOwner()), + // has owner with name != "bar". + card.HasOwnerWith(ent.Not(user.Name(usr.Name))), + ), + ). + OnlyX(ctx).Number, + ) + + t.Log("query with side lookup condition on assoc") + require.Equal(usr.Name, client.User.Query().Where(user.HasCardWith(card.Number(crd.Number))).OnlyX(ctx).Name) + require.Zero(client.User.Query().Where(user.HasCardWith(ent.Not(card.Number(crd.Number)))).CountX(ctx)) + // either has no card, or has card with number != "10". + require.Equal( + ousr.Name, + client.User.Query(). + Where( + ent.Or( + // has no card. + ent.Not(user.HasCard()), + // has card with number != "10". + user.HasCardWith(ent.Not(card.Number(crd.Number))), + ), + ). + OnlyX(ctx).Name, + ) + + t.Log("query long path from inverse") + require.Equal(crd.Number, crd.QueryOwner().QueryCard().OnlyX(ctx).Number, "should get itself") + require.Equal(usr.Name, crd.QueryOwner().QueryCard().QueryOwner().OnlyX(ctx).Name, "should get its owner") + require.Equal( + usr.Name, + crd.QueryOwner(). + Where(user.HasCard()). + QueryCard(). + QueryOwner(). + Where(user.HasCard()). + OnlyX(ctx).Name, + "should get its owner", + ) + + t.Log("query long path from assoc") + require.Equal(usr.Name, usr.QueryCard().QueryOwner().OnlyX(ctx).Name, "should get itself") + require.Equal(crd.Number, usr.QueryCard().QueryOwner().QueryCard().OnlyX(ctx).Number, "should get its card") + require.Equal( + crd.Number, + usr.QueryCard(). + Where(card.HasOwner()). + QueryOwner(). + Where(user.HasCard()). + QueryCard(). + OnlyX(ctx).Number, + "should get its card", + ) +} + +// Demonstrate a O2O relation between two instances of the same type. A linked-list +// nodes, where each node has an edge named "next" with inverse named "prev". +func O2OSameType(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("head of the list") + head := client.Node.Create().SetValue(1).SaveX(ctx) + require.Zero(head.QueryPrev().CountX(ctx)) + require.Zero(head.QueryNext().CountX(ctx)) + + t.Log("add node to the linked-list and connect it to the head (inverse creation)") + sec := client.Node.Create().SetValue(2).SetPrev(head).SaveX(ctx) + require.Zero(sec.QueryNext().CountX(ctx), "should not have next") + require.Equal(head.ID, sec.QueryPrev().OnlyX(ctx).ID, "head should point to the second node") + require.Equal(sec.ID, head.QueryNext().OnlyX(ctx).ID) + require.Equal(2, client.Node.Query().CountX(ctx), "linked-list should have 2 nodes") + + t.Log("delete inverse should delete association") + client.Node.DeleteOne(sec).ExecX(ctx) + require.Zero(head.QueryNext().CountX(ctx)) + require.Equal(1, client.Node.Query().CountX(ctx), "linked-list should have 1 node") + + t.Log("add node to the linked-list by updating the head (the owner of the edge)") + sec = client.Node.Create().SetValue(2).SaveX(ctx) + head.Update().SetNext(sec).ExecX(ctx) + require.Zero(sec.QueryNext().CountX(ctx), "should not have next") + require.Equal(head.ID, sec.QueryPrev().OnlyX(ctx).ID, "head should point to the second node") + require.Equal(sec.ID, head.QueryNext().OnlyX(ctx).ID) + require.Equal(2, client.Node.Query().CountX(ctx), "linked-list should have 2 nodes") + + t.Log("delete assoc should delete inverse edge") + client.Node.DeleteOne(head).ExecX(ctx) + require.Zero(sec.QueryPrev().CountX(ctx), "second node should be the head now") + require.Zero(sec.QueryNext().CountX(ctx), "second node should be the head now") + + t.Log("update second node value to be 1") + head = sec.Update().SetValue(1).SaveX(ctx) + require.Equal(1, head.Value) + + t.Log("create a linked-list 1->2->3->4->5") + nodes := []*ent.Node{head} + for i := 0; i < 4; i++ { + next := client.Node.Create().SetValue(nodes[i].Value + 1).SetPrev(nodes[i]).SaveX(ctx) + nodes = append(nodes, next) + } + require.Equal(len(nodes), client.Node.Query().CountX(ctx)) + + t.Log("check correctness of the list values") + for i, n := range nodes[:3] { + require.Equal(i+1, n.Value) + require.Equal(nodes[i+1].Value, n.QueryNext().OnlyX(ctx).Value) + } + require.Zero(nodes[len(nodes)-1].QueryNext().CountX(ctx), "last node should point to nil") + + t.Log("query with side lookup on inverse/assoc") + require.Equal(4, client.Node.Query().Where(node.HasNext()).CountX(ctx)) + require.Equal(4, client.Node.Query().Where(node.HasPrev()).CountX(ctx)) + + t.Log("make the linked-list to be circular") + nodes[len(nodes)-1].Update().SetNext(head).SaveX(ctx) + require.Equal(nodes[0].Value, nodes[len(nodes)-1].QueryNext().OnlyX(ctx).Value, "last node should point to head") + require.Equal(nodes[len(nodes)-1].Value, nodes[0].QueryPrev().OnlyX(ctx).Value, "head should have a reference to the tail") + + t.Log("query with side lookup on inverse/assoc") + require.Equal(5, client.Node.Query().Where(node.HasNext()).CountX(ctx)) + require.Equal(5, client.Node.Query().Where(node.HasPrev()).CountX(ctx)) + // node that points (with "next") to other node with value 2 (the head). + require.Equal(nodes[0].Value, client.Node.Query().Where(node.HasNextWith(node.Value(2))).OnlyX(ctx).Value) + // node that points (with "next") to other node with value 1 (the tail). + require.Equal(nodes[len(nodes)-1].Value, client.Node.Query().Where(node.HasNextWith(node.Value(1))).OnlyX(ctx).Value) + // nodes that points to nodes with value greater than 2 (X->2->3->4->X). + values, err := client.Node.Query(). + Where(node.HasNextWith(node.ValueGT(2))). + Order(ent.Asc(node.FieldValue)). + GroupBy(node.FieldValue). + Ints(ctx) + require.NoError(err) + require.Equal([]int{2, 3, 4}, values) + + t.Log("query long path from inverse") + // going back from head to tail until we reach the head. + require.Equal( + head.Value, + head. + QueryPrev(). // 5 (tail) + QueryPrev(). // 4 + QueryPrev(). // 3 + QueryPrev(). // 2 + QueryPrev(). // 1 (head) + OnlyX(ctx).Value, + ) + // disrupt the query in the middle. + require.Zero(head.QueryPrev().QueryPrev().Where(node.ValueGT(10)).QueryPrev().QueryPrev().QueryPrev().CountX(ctx)) + + t.Log("query long path from assoc") + // going forward from head to next until we reach the head. + require.Equal( + head.Value, + head. + QueryNext(). // 2 + QueryNext(). // 3 + QueryNext(). // 4 + QueryNext(). // 5 (tail) + QueryNext(). // 1 (head) + OnlyX(ctx).Value, + ) + // disrupt the query in the middle. + require.Zero(head.QueryNext().QueryNext().Where(node.ValueGT(10)).QueryNext().QueryNext().QueryNext().CountX(ctx)) + + t.Log("delete all nodes except the head") + client.Node.Delete().Where(node.ValueGT(1)).ExecX(ctx) + head = client.Node.Query().OnlyX(ctx) + + t.Log("node points to itself (circular linked-list with 1 node)") + head.Update().SetNext(head).SaveX(ctx) + require.Equal(head.ID, head.QueryPrev().OnlyXID(ctx)) + require.Equal(head.ID, head.QueryNext().OnlyXID(ctx)) + head.Update().ClearNext().SaveX(ctx) + require.Zero(head.QueryPrev().CountX(ctx)) + require.Zero(head.QueryNext().CountX(ctx)) +} + +// Demonstrate a O2O relation between two instances of the same type, where the relation +// has the same name in both directions. A couple. User A has "spouse" B (and vice versa). +// When setting B as a spouse of A, this sets A as spouse of B as well. In other words: +// +// foo := client.User.Create().SetName("foo").SaveX(ctx) +// bar := client.User.Create().SetName("bar").SetSpouse(foo).SaveX(ctx) +// count := client.User.Query.Where(user.HasSpouse()).CountX(ctx) +// // count will be 2, even though we've created only one relation above. +// +func O2OSelfRef(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new user without spouse") + foo := client.User.Create().SetAge(10).SetName("foo").SaveX(ctx) + require.False(foo.QuerySpouse().ExistX(ctx)) + + t.Log("sets spouse on user creation (inverse creation)") + bar := client.User.Create().SetAge(10).SetName("bar").SetSpouse(foo).SaveX(ctx) + require.True(foo.QuerySpouse().ExistX(ctx)) + require.True(bar.QuerySpouse().ExistX(ctx)) + require.Equal(2, client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + + t.Log("delete inverse should delete association") + client.User.DeleteOne(bar).ExecX(ctx) + require.False(foo.QuerySpouse().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + + t.Log("add spouse to user by updating a user") + bar = client.User.Create().SetAge(10).SetName("bar").SaveX(ctx) + foo.Update().SetSpouse(bar).ExecX(ctx) + require.True(foo.QuerySpouse().ExistX(ctx)) + require.True(bar.QuerySpouse().ExistX(ctx)) + require.Equal(2, client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + + t.Log("remove a spouse using update") + foo.Update().ClearSpouse().ExecX(ctx) + require.False(foo.QuerySpouse().ExistX(ctx)) + require.False(bar.QuerySpouse().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + // return back the spouse. + foo.Update().SetSpouse(bar).ExecX(ctx) + + t.Log("create a user without spouse") + baz := client.User.Create().SetAge(10).SetName("baz").SaveX(ctx) + require.False(baz.QuerySpouse().ExistX(ctx)) + require.Equal(2, client.User.Query().Where(user.HasSpouse()).CountX(ctx)) + + t.Log("set a new spouse") + foo.Update().ClearSpouse().SetSpouse(baz).ExecX(ctx) + require.True(foo.QuerySpouse().ExistX(ctx)) + require.True(baz.QuerySpouse().ExistX(ctx)) + require.False(bar.QuerySpouse().ExistX(ctx)) + // return back the spouse. + foo.Update().ClearSpouse().SetSpouse(bar).ExecX(ctx) + + t.Log("spouse is a unique edge") + require.Error(baz.Update().SetSpouse(bar).Exec(ctx)) + require.Error(baz.Update().SetSpouse(foo).Exec(ctx)) + + t.Log("query with side lookup") + require.Equal( + bar.Name, + client.User.Query(). + Where(user.HasSpouseWith(user.Name("foo"))). + OnlyX(ctx).Name, + ) + require.Equal( + foo.Name, + client.User.Query(). + Where(user.HasSpouseWith(user.Name("bar"))). + OnlyX(ctx).Name, + ) + require.Equal( + baz.Name, + client.User.Query(). + Where(ent.Not(user.HasSpouse())). + OnlyX(ctx).Name, + ) + // has spouse that has a spouse with name "foo" (which actually means itself). + require.Equal( + foo.Name, + client.User.Query(). + Where(user.HasSpouseWith(user.HasSpouseWith(user.Name("foo")))). + OnlyX(ctx).Name, + ) + // has spouse that has a spouse with name "bar" (which actually means itself). + require.Equal( + bar.Name, + client.User.Query(). + Where(user.HasSpouseWith(user.HasSpouseWith(user.Name("bar")))). + OnlyX(ctx).Name, + ) + + t.Log("query path from a user") + require.Equal( + foo.Name, + foo. + QuerySpouse(). // bar + QuerySpouse(). // foo + QuerySpouse(). // bar + QuerySpouse(). // foo + OnlyX(ctx).Name, + ) + require.Equal( + bar.Name, + bar. + QuerySpouse(). // foo + QuerySpouse(). // bar + QuerySpouse(). // foo + QuerySpouse(). // bar + OnlyX(ctx).Name, + ) + + t.Log("query path from client") + require.Equal( + bar.Name, + client.User. + Query(). + Where(user.Name("foo")). // foo + QuerySpouse(). // bar + OnlyX(ctx).Name, + ) + require.Equal( + bar.Name, + client.User. + Query(). + Where(user.Name("bar")). // bar + QuerySpouse(). // foo + QuerySpouse(). // bar + OnlyX(ctx).Name, + ) +} + +// Demonstrate a O2M/M2O relation between two different types. A User and its Pets. +// The User type is the "owner" of the edge (assoc), and the Pet as an inverse edge to +// its owner. User can have one or more Pets, and Pet have only one owner (not required). +func O2MTwoTypes(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new user without pet") + usr := client.User.Create().SetAge(30).SetName("a8m").SaveX(ctx) + require.False(usr.QueryPets().ExistX(ctx)) + + t.Log("add pet to user on pet creation (inverse creation)") + pedro := client.Pet.Create().SetName("pedro").SetOwner(usr).SaveX(ctx) + require.Equal(usr.Name, pedro.QueryOwner().OnlyX(ctx).Name) + require.Equal(pedro.Name, usr.QueryPets().OnlyX(ctx).Name) + + t.Log("delete inverse should delete association") + client.Pet.DeleteOne(pedro).ExecX(ctx) + require.Zero(client.Pet.Query().CountX(ctx)) + require.False(usr.QueryPets().ExistX(ctx), "user should not have pet") + + t.Log("add pet to user by updating user (the owner of the edge)") + pedro = client.Pet.Create().SetName("pedro").SaveX(ctx) + usr.Update().AddPets(pedro).ExecX(ctx) + require.Equal(usr.Name, pedro.QueryOwner().OnlyX(ctx).Name) + require.Equal(pedro.Name, usr.QueryPets().OnlyX(ctx).Name) + + t.Log("delete assoc (owner of the edge) should delete inverse edge") + client.User.DeleteOne(usr).ExecX(ctx) + require.Zero(client.User.Query().CountX(ctx)) + require.False(pedro.QueryOwner().ExistX(ctx), "pet should not have an owner") + + t.Log("add pet to user by updating pet (the inverse edge)") + usr = client.User.Create().SetAge(30).SetName("a8m").SaveX(ctx) + pedro.Update().SetOwner(usr).ExecX(ctx) + require.Equal(usr.Name, pedro.QueryOwner().OnlyX(ctx).Name) + require.Equal(pedro.Name, usr.QueryPets().OnlyX(ctx).Name) + + t.Log("add another pet to user") + xabi := client.Pet.Create().SetName("xabi").SetOwner(usr).SaveX(ctx) + require.Equal(2, usr.QueryPets().CountX(ctx)) + require.Equal(1, xabi.QueryOwner().CountX(ctx)) + require.Equal(1, pedro.QueryOwner().CountX(ctx)) + + t.Log("edge is unique on the inverse side") + _, err := client.User.Create().SetAge(30).SetName("alex").AddPets(pedro).Save(ctx) + require.Error(err, "pet already has an owner") + + t.Log("add multiple pets on creation") + p1 := client.Pet.Create().SetName("p1").SaveX(ctx) + p2 := client.Pet.Create().SetName("p2").SaveX(ctx) + usr2 := client.User.Create().SetAge(30).SetName("alex").AddPets(p1, p2).SaveX(ctx) + require.True(p1.QueryOwner().ExistX(ctx)) + require.True(p2.QueryOwner().ExistX(ctx)) + require.Equal(2, usr2.QueryPets().CountX(ctx)) + // delete p1, p2. + client.Pet.Delete().Where(pet.IDIn(p1.ID, p2.ID)).ExecX(ctx) + require.Zero(usr2.QueryPets().CountX(ctx)) + + t.Log("change the owner a pet") + xabi.Update().ClearOwner().SetOwner(usr2).ExecX(ctx) + require.Equal(1, usr.QueryPets().CountX(ctx)) + require.Equal(1, usr2.QueryPets().CountX(ctx)) + require.Equal(usr2.Name, xabi.QueryOwner().OnlyX(ctx).Name) + + t.Log("query with side lookup on inverse") + opet := client.Pet.Create().SetName("orphan pet").SaveX(ctx) + require.Equal(opet.Name, client.Pet.Query().Where(ent.Not(pet.HasOwner())).OnlyX(ctx).Name) + require.Equal(2, client.Pet.Query().Where(pet.HasOwner()).CountX(ctx)) + + t.Log("query with side lookup on assoc") + require.Zero(client.User.Query().Where(ent.Not(user.HasPets())).CountX(ctx)) + ousr := client.User.Create().SetAge(10).SetName("user without pet").SaveX(ctx) + require.Equal(2, client.User.Query().Where(user.HasPets()).CountX(ctx)) + require.Equal(ousr.Name, client.User.Query().Where(ent.Not(user.HasPets())).OnlyX(ctx).Name) + + t.Log("query with side lookup condition on inverse") + require.Equal(pedro.Name, client.Pet.Query().Where(pet.HasOwnerWith(user.Name(usr.Name))).OnlyX(ctx).Name) + // has owner, but with name != "a8m". + require.Equal(xabi.Name, client.Pet.Query().Where(pet.HasOwnerWith(ent.Not(user.Name(usr.Name)))).OnlyX(ctx).Name) + // either has no owner, or has owner with name != "alex" and name != "a8m". + require.Equal( + opet.Name, + client.Pet.Query(). + Where( + ent.Or( + // has no owner. + ent.Not(pet.HasOwner()), + // has owner with name != "a8m" and name != "alex". + pet.HasOwnerWith( + ent.Not(user.Name(usr.Name)), + ent.Not(user.Name(usr2.Name)), + ), + ), + ). + OnlyX(ctx).Name, + ) + + t.Log("query with side lookup condition on assoc") + require.Equal(usr.Name, client.User.Query().Where(user.HasPetsWith(pet.Name(pedro.Name))).OnlyX(ctx).Name) + require.Equal(usr2.Name, client.User.Query().Where(user.HasPetsWith(pet.Name(xabi.Name))).OnlyX(ctx).Name) + require.Zero( + client.User.Query(). + Where( + user.HasPetsWith( + ent.Not(pet.Name(xabi.Name)), + ent.Not(pet.Name(pedro.Name)), + ), + ).CountX(ctx), + ) + // either has no pet, or has pet with name != "pedro" and name != "xabi". + require.Equal( + ousr.Name, + client.User.Query(). + Where( + ent.Or( + // has no pet. + ent.Not(user.HasPets()), + // has pet with name != "pedro" and name != "xabi". + user.HasPetsWith( + ent.Not(pet.Name(xabi.Name)), + ent.Not(pet.Name(pedro.Name)), + ), + ), + ). + OnlyX(ctx).Name, + ) + + t.Log("query long path from inverse") + require.Equal(pedro.Name, pedro.QueryOwner().QueryPets().OnlyX(ctx).Name, "should get itself") + require.Equal(usr.Name, pedro.QueryOwner().QueryPets().QueryOwner().OnlyX(ctx).Name, "should get its owner") + require.Equal( + usr.Name, + pedro.QueryOwner(). + Where(user.HasPets()). + QueryPets(). + QueryOwner(). + Where(user.HasPets()). + OnlyX(ctx).Name, + "should get its owner", + ) + + t.Log("query long path from assoc") + require.Equal(usr.Name, usr.QueryPets().QueryOwner().OnlyX(ctx).Name, "should get itself") + require.Equal(pedro.Name, usr.QueryPets().QueryOwner().QueryPets().OnlyX(ctx).Name, "should get its pet") + require.Equal( + pedro.Name, + usr.QueryPets(). + Where(pet.HasOwner()). // pedro + QueryOwner(). // + Where(user.HasPets()). // a8m + QueryPets(). // pedro + OnlyX(ctx).Name, + "should get its pet", + ) + require.Equal( + xabi.Name, + client.User.Query(). + // alex matches this query (not a8m, and have a pet). + Where( + ent.Not(user.Name(usr.Name)), + user.HasPets(), + ). + QueryPets(). // xabi + QueryOwner(). // alex + QueryPets(). // xabi + OnlyX(ctx).Name, + ) +} + +// Demonstrate a O2M/M2O relation between two instances of the same type. A "parent" and +// its children. User can have one or more children, but can have only one parent (unique inverse edge). +// Note that both edges are not required. +func O2MSameType(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new parent without children") + prt := client.User.Create().SetAge(30).SetName("a8m").SaveX(ctx) + require.Zero(prt.QueryChildren().CountX(ctx)) + + t.Log("add child to parent on child creation (inverse creation)") + chd := client.User.Create().SetAge(1).SetName("child").SetParent(prt).SaveX(ctx) + require.Equal(prt.Name, chd.QueryParent().OnlyX(ctx).Name) + require.Equal(chd.Name, prt.QueryChildren().OnlyX(ctx).Name) + + t.Log("delete inverse should delete association") + client.User.DeleteOne(chd).ExecX(ctx) + require.False(prt.QueryChildren().ExistX(ctx), "user should not have children") + + t.Log("add child to parent by updating user (the owner of the edge)") + chd = client.User.Create().SetAge(1).SetName("child").SaveX(ctx) + prt.Update().AddChildIDs(chd.ID).ExecX(ctx) + require.Equal(prt.Name, chd.QueryParent().OnlyX(ctx).Name) + require.Equal(chd.Name, prt.QueryChildren().OnlyX(ctx).Name) + + t.Log("delete assoc (owner of the edge) should delete inverse edge") + client.User.DeleteOne(prt).ExecX(ctx) + require.Equal(1, client.User.Query().CountX(ctx)) + require.False(chd.QueryParent().ExistX(ctx), "child should not have an owner") + + t.Log("add pet to user by updating pet (the inverse edge)") + prt = client.User.Create().SetAge(30).SetName("a8m").SaveX(ctx) + chd.Update().SetParent(prt).ExecX(ctx) + require.Equal(prt.Name, chd.QueryParent().OnlyX(ctx).Name) + require.Equal(chd.Name, prt.QueryChildren().OnlyX(ctx).Name) + require.Zero(prt.QueryParent().CountX(ctx), "parent is orphan") + require.Zero(chd.QueryChildren().CountX(ctx), "child should not have children") + + t.Log("add another pet to user") + chd2 := client.User.Create().SetAge(1).SetName("child2").SetParent(prt).SaveX(ctx) + require.Equal(2, prt.QueryChildren().CountX(ctx)) + require.Equal(1, chd.QueryParent().CountX(ctx)) + require.Equal(1, chd2.QueryParent().CountX(ctx)) + + t.Log("edge is unique on the inverse side") + _, err := client.User.Create().SetAge(30).SetName("alex").AddChildren(chd).Save(ctx) + require.Error(err, "child already has parent") + _, err = client.User.Create().SetAge(30).SetName("alex").AddChildren(chd2).Save(ctx) + require.Error(err, "child already has parent") + + t.Log("add multiple child on creation") + chd3 := client.User.Create().SetAge(1).SetName("child3").SaveX(ctx) + chd4 := client.User.Create().SetAge(1).SetName("child4").SaveX(ctx) + prt2 := client.User.Create().SetAge(30).SetName("alex").AddChildren(chd3, chd4).SaveX(ctx) + require.True(chd3.QueryParent().ExistX(ctx)) + require.True(chd3.QueryParent().ExistX(ctx)) + require.Equal(2, prt2.QueryChildren().CountX(ctx)) + // delete chd3, chd4. + client.User.Delete().Where(user.IDIn(chd3.ID, chd4.ID)).ExecX(ctx) + require.Zero(prt2.QueryChildren().CountX(ctx)) + + t.Log("change the parent a child") + chd2.Update().ClearParent().SetParent(prt2).ExecX(ctx) + require.Equal(1, prt.QueryChildren().CountX(ctx)) + require.Equal(1, prt2.QueryChildren().CountX(ctx)) + require.Equal(chd2.Name, prt2.QueryChildren().OnlyX(ctx).Name) + + t.Log("query with side lookup on inverse") + ochd := client.User.Create().SetAge(1).SetName("orphan user").SaveX(ctx) + require.Equal(3, client.User.Query().Where(ent.Not(user.HasParent())).CountX(ctx)) + require.Equal( + ochd.Name, + client.User.Query(). + Where( + ent.Not(user.HasParent()), + ent.Not(user.HasChildren()), + ). + OnlyX(ctx).Name, + "3 orphan users, but only one does not have children", + ) + require.Equal(2, client.User.Query().Where(user.HasParent()).CountX(ctx)) + + t.Log("query with side lookup on assoc") + require.Equal(2, client.User.Query().Where(user.HasChildren()).CountX(ctx)) + require.Equal(3, client.User.Query().Where(ent.Not(user.HasChildren())).CountX(ctx)) + + t.Log("query with side lookup condition on inverse") + require.Equal(chd.Name, client.User.Query().Where(user.HasParentWith(user.Name(prt.Name))).OnlyX(ctx).Name) + // has parent, but with name != "a8m". + require.Equal(chd2.Name, client.User.Query().Where(user.HasParentWith(ent.Not(user.Name(prt.Name)))).OnlyX(ctx).Name) + // either has no parent, or has parent with name != "alex". + require.Equal( + 4, + client.User.Query(). + Where( + ent.Or( + // has no parent. + ent.Not(user.HasParent()), + // has parent with name != "alex". + user.HasParentWith( + ent.Not(user.Name(prt2.Name)), + ), + ), + ). + CountX(ctx), + "should match chd, ochd, prt, prt2", + ) + // either has no parent, or has parent with name != "a8m". + require.Equal( + 4, + client.User.Query(). + Where( + ent.Or( + // has no parent. + ent.Not(user.HasParent()), + // has parent with name != "a8m". + user.HasParentWith( + ent.Not(user.Name(prt.Name)), + ), + ), + ). + CountX(ctx), + "should match chd2, ochd, prt, prt2", + ) + + t.Log("query with side lookup condition on assoc") + require.Equal(prt.Name, client.User.Query().Where(user.HasChildrenWith(user.Name(chd.Name))).OnlyX(ctx).Name) + require.Equal(prt2.Name, client.User.Query().Where(user.HasChildrenWith(user.Name(chd2.Name))).OnlyX(ctx).Name) + // parent with 2 children named: child and child2. + require.Zero( + client.User.Query(). + Where( + user.HasChildrenWith( + pet.Name(chd.Name), + pet.Name(chd2.Name), + ), + ). + CountX(ctx), + ) + // either has no children, or has 2 children: "child" and "child2". + require.Equal( + 3, + client.User.Query(). + Where( + ent.Or( + // has no children. + ent.Not(user.HasChildren()), + // has 2 children: "child" and "child2". + user.HasChildrenWith( + user.Name(chd.Name), + user.Name(chd2.Name), + ), + ), + ). + CountX(ctx), + "should match chd, chd2 and ochd", + ) + + t.Log("query long path from inverse") + require.Equal(chd.Name, chd.QueryParent().QueryChildren().OnlyX(ctx).Name, "should get itself") + require.Equal(prt.Name, chd.QueryParent().QueryChildren().QueryParent().OnlyX(ctx).Name, "should get its parent") + require.Equal( + prt.Name, + chd.QueryParent(). + Where(user.HasChildren()). + QueryChildren(). + QueryParent(). + Where(user.HasChildren()). + OnlyX(ctx).Name, + "should get its owner", + ) + + t.Log("query long path from assoc") + require.Equal(prt.Name, prt.QueryChildren().QueryParent().OnlyX(ctx).Name, "should get itself") + require.Equal(chd.Name, prt.QueryChildren().QueryParent().QueryChildren().OnlyX(ctx).Name, "should get its child") + require.Equal( + chd.Name, + prt.QueryChildren(). + Where(user.HasParent()). // child + QueryParent(). // + Where(user.HasChildren()). // parent + QueryChildren(). // child + OnlyX(ctx).Name, + "should get its child", + ) + require.Equal( + chd2.Name, + client.User.Query(). + // "alex" matches this query (not "a8m", and have a child). + Where( + ent.Not(user.Name(prt.Name)), + user.HasChildren(), + ). + QueryChildren(). // child + QueryParent(). // parent + QueryChildren(). // child + OnlyX(ctx).Name, + ) +} + +// Demonstrate a M2M relation between two instances of the same type, where the relation +// has the same name in both directions. A friendship between Users. +// User A has "friend" B (and vice versa). When setting B as a friend of A, this sets A +// as friend of B as well. In other words: +// +// foo := client.User.Create().SetName("foo").SaveX(ctx) +// bar := client.User.Create().SetName("bar").AddFriends(foo).SaveX(ctx) +// count := client.User.Query.Where(user.HasFriends()).CountX(ctx) +// // count will be 2, even though we've created only one relation above. +// +func M2MSelfRef(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new user without friends") + foo := client.User.Create().SetAge(10).SetName("foo").SaveX(ctx) + require.False(foo.QueryFriends().ExistX(ctx)) + + t.Log("sets friendship on user creation (inverse creation)") + bar := client.User.Create().SetAge(10).SetName("bar").AddFriends(foo).SaveX(ctx) + require.True(foo.QueryFriends().ExistX(ctx)) + require.True(bar.QueryFriends().ExistX(ctx)) + require.Equal(2, client.User.Query().Where(user.HasFriends()).CountX(ctx)) + + t.Log("delete inverse should delete association") + client.User.DeleteOne(bar).ExecX(ctx) + require.False(foo.QueryFriends().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasFriends()).CountX(ctx)) + + t.Log("add friendship to user by updating existing users") + bar = client.User.Create().SetAge(10).SetName("bar").SaveX(ctx) + foo.Update().AddFriends(bar).ExecX(ctx) + require.True(foo.QueryFriends().ExistX(ctx)) + require.True(bar.QueryFriends().ExistX(ctx)) + require.Equal(2, client.User.Query().Where(user.HasFriends()).CountX(ctx)) + + t.Log("remove friendship using update") + foo.Update().RemoveFriends(bar).ExecX(ctx) + require.False(foo.QueryFriends().ExistX(ctx)) + require.False(bar.QueryFriends().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasFriends()).CountX(ctx)) + // return back the friendship. + foo.Update().AddFriends(bar).ExecX(ctx) + + t.Log("create a user without friends") + baz := client.User.Create().SetAge(10).SetName("baz").SaveX(ctx) + require.False(baz.QueryFriends().ExistX(ctx)) + require.Equal(2, client.User.Query().Where(user.HasFriends()).CountX(ctx)) + + t.Log("both baz and bar are friends of foo") + baz.Update().AddFriends(foo).ExecX(ctx) + require.Equal(2, foo.QueryFriends().CountX(ctx)) + require.Equal(foo.Name, bar.QueryFriends().OnlyX(ctx).Name) + require.Equal(foo.Name, baz.QueryFriends().OnlyX(ctx).Name) + require.Equal(3, client.User.Query().Where(user.HasFriends()).CountX(ctx)) + + t.Log("query with side lookup") + require.Equal( + []string{bar.Name, baz.Name}, + client.User.Query(). + Where(user.HasFriendsWith(user.Name(foo.Name))). + Order(ent.Asc(user.FieldName)). + GroupBy(user.FieldName). + StringsX(ctx), + ) + require.Equal( + foo.Name, + client.User.Query(). + Where(user.HasFriendsWith(user.Name(bar.Name))). + OnlyX(ctx).Name, + ) + require.Equal( + foo.Name, + client.User.Query(). + Where(ent.Not(user.HasFriendsWith(user.Name(foo.Name)))). + OnlyX(ctx).Name, + "foo does not have friendship with foo", + ) + require.Equal( + []string{bar.Name, baz.Name}, + client.User.Query(). + Where(ent.Not(user.HasFriendsWith(user.Name(baz.Name)))). + Order(ent.Asc(user.FieldName)). + GroupBy(user.FieldName). + StringsX(ctx), + "bar and baz do not have friendship with baz", + ) + + t.Log("query path from a user") + require.Equal( + foo.Name, + foo. + QueryFriends().Where(user.Name(bar.Name)). // bar + QueryFriends(). // foo + QueryFriends().Where(user.Name(baz.Name)). // baz + QueryFriends(). // foo + OnlyX(ctx).Name, + ) + require.Equal( + foo.Name, + foo. + QueryFriends(). // bar, baz + QueryFriends(). // foo + QueryFriends(). // bar, baz + QueryFriends(). // foo + OnlyX(ctx).Name, + ) + require.Equal( + baz.Name, + foo. + QueryFriends().Where(user.Name(bar.Name)). // bar + QueryFriends(). // foo + QueryFriends().Where(ent.Not(user.Name(bar.Name))). // baz + OnlyX(ctx).Name, + ) + + t.Log("query path from client") + require.Equal( + []string{bar.Name, baz.Name}, + client.User. + Query(). + Where(user.Name(foo.Name)). // foo + QueryFriends(). // bar, baz + Order(ent.Asc(user.FieldName)). + GroupBy(user.FieldName). + StringsX(ctx), + ) + require.Equal( + bar.Name, + client.User. + Query(). + // foo has a friend (bar) that does not have a friend named baz. + Where( + user.HasFriendsWith( + ent.Not( + user.HasFriendsWith(user.Name(baz.Name)), + ), + ), + ). + // bar and baz. + QueryFriends(). + // filter baz out. + Where(ent.Not(user.Name(baz.Name))). + OnlyX(ctx).Name, + ) +} + +// Demonstrate a M2M relation between two instances of the same type. +// Following and followers. +func M2MSameType(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new user without followers") + foo := client.User.Create().SetAge(10).SetName("foo").SaveX(ctx) + require.False(foo.QueryFollowers().ExistX(ctx)) + + t.Log("adds followers on user creation (inverse creation)") + bar := client.User.Create().SetAge(10).SetName("bar").AddFollowing(foo).SaveX(ctx) + require.Equal(foo.Name, bar.QueryFollowing().OnlyX(ctx).Name) + require.Equal(bar.Name, foo.QueryFollowers().OnlyX(ctx).Name) + require.Equal(1, client.User.Query().Where(user.HasFollowers()).CountX(ctx)) + require.Equal(1, client.User.Query().Where(user.HasFollowing()).CountX(ctx)) + + t.Log("delete inverse should delete association") + client.User.DeleteOne(bar).ExecX(ctx) + require.False(foo.QueryFollowers().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasFollowers()).CountX(ctx)) + require.Zero(client.User.Query().Where(user.HasFollowing()).CountX(ctx)) + + t.Log("add followers to user by updating existing users") + bar = client.User.Create().SetAge(10).SetName("bar").SaveX(ctx) + foo.Update().AddFollowers(bar).ExecX(ctx) + require.Equal(foo.Name, bar.QueryFollowing().OnlyX(ctx).Name) + require.Equal(bar.Name, foo.QueryFollowers().OnlyX(ctx).Name) + require.Equal(1, client.User.Query().Where(user.HasFollowers()).CountX(ctx)) + require.Equal(1, client.User.Query().Where(user.HasFollowing()).CountX(ctx)) + + t.Log("remove following using update") + bar.Update().RemoveFollowing(foo).ExecX(ctx) + require.False(foo.QueryFollowers().ExistX(ctx)) + require.False(bar.QueryFollowing().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasFollowing()).CountX(ctx)) + require.Zero(client.User.Query().Where(user.HasFollowers()).CountX(ctx)) + // follow back. + bar.Update().AddFollowing(foo).ExecX(ctx) + + t.Log("remove followers using update (inverse)") + foo.Update().RemoveFollowers(bar).ExecX(ctx) + require.False(foo.QueryFollowers().ExistX(ctx)) + require.False(bar.QueryFollowing().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasFollowing()).CountX(ctx)) + require.Zero(client.User.Query().Where(user.HasFollowers()).CountX(ctx)) + // follow back. + bar.Update().AddFollowing(foo).ExecX(ctx) + + users := make([]*ent.User, 5) + for i := range users { + u := client.User.Create().SetAge(10).SetName(fmt.Sprintf("user-%d", i)).SaveX(ctx) + users[i] = u.Update().AddFollowing(foo, bar).SaveX(ctx) + require.Equal( + []string{bar.Name, foo.Name}, + u.QueryFollowing(). + Order(ent.Asc(user.FieldName)). + GroupBy(user.FieldName). + StringsX(ctx), + ) + } + require.Equal(5, bar.QueryFollowers().CountX(ctx), "users1..5") + require.Equal(6, foo.QueryFollowers().CountX(ctx), "users1..5 and bar") + require.Equal(2, client.User.Query().Where(user.HasFollowers()).CountX(ctx), "foo and bar") + require.Equal(6, client.User.Query().Where(user.HasFollowing()).CountX(ctx), "users1..5 and bar") + // compare followers. + require.Equal( + bar.QueryFollowers(). + Order(ent.Asc(user.FieldName)). + GroupBy(user.FieldName). + StringsX(ctx), + foo.QueryFollowers(). + Where(ent.Not(user.Name(bar.Name))). + Order(ent.Asc(user.FieldName)). + GroupBy(user.FieldName). + StringsX(ctx), + "bar.followers = (foo.followers - bar)", + ) + + // delete users 1..5. + client.User.Delete().Where(user.NameHasPrefix("user")).ExecX(ctx) + require.Equal(2, client.User.Query().CountX(ctx)) + + t.Log("query with side lookup from inverse") + require.Equal(foo.Name, foo.QueryFollowers().QueryFollowing().OnlyX(ctx).Name, "should get itself") + require.Equal(bar.Name, foo.QueryFollowers().QueryFollowing().QueryFollowers().OnlyX(ctx).Name, "should get its follower (bar)") + + t.Log("query with side lookup from assoc") + require.Equal(bar.Name, bar.QueryFollowing().QueryFollowers().OnlyX(ctx).Name, "should get itself") + require.Equal(foo.Name, bar.QueryFollowing().QueryFollowers().QueryFollowing().OnlyX(ctx).Name, "should get foo") + + // generate additional users and make sure we don't get them in the queries below. + client.User.Create().SetAge(10).SetName("baz").SaveX(ctx) + client.User.Create().SetAge(10).SetName("qux").SaveX(ctx) + + t.Log("query path from a user") + require.Equal( + bar.Name, + foo. + QueryFollowers().Where(user.Name(bar.Name)). // bar + QueryFollowing().Where(user.HasFollowers()). // foo + QueryFollowers(). // bar + Where( + user.HasFollowingWith( + user.Name(foo.Name), + ), + ). + OnlyX(ctx).Name, + ) + + t.Log("query path from client") + require.Equal( + foo.Name, + client.User. + Query().Where(user.Name(foo.Name)). // foo + QueryFollowers().Where(user.Name(bar.Name)). // bar + QueryFollowing().Where(user.HasFollowers()). // foo + QueryFollowers(). // bar + Where( + user.HasFollowingWith( + user.Name(foo.Name), + ), + ). + // has followers named bar (foo). + QueryFollowing(). + Where( + user.HasFollowersWith( + user.Name(bar.Name), + ), + ). + OnlyX(ctx).Name, + ) +} + +// Demonstrate a M2M relation between two different types. User and groups. +func M2MTwoTypes(t *testing.T, client *ent.Client) { + require := require.New(t) + ctx := context.Background() + + t.Log("new user without groups") + foo := client.User.Create().SetAge(10).SetName("foo").SaveX(ctx) + require.False(foo.QueryGroups().ExistX(ctx)) + require.Zero(client.Group.Query().CountX(ctx)) + + t.Log("adds users to group on group creation (inverse creation)") + // group-info is required edge. + inf := client.GroupInfo.Create().SetDesc("desc").SaveX(ctx) + hub := client.Group.Create().SetName("Github").SetExpire(time.Now()).AddUsers(foo).SetInfo(inf).SaveX(ctx) + require.Equal(foo.Name, hub.QueryUsers().OnlyX(ctx).Name, "group has only one user") + require.Equal(hub.Name, foo.QueryGroups().OnlyX(ctx).Name, "user is connected to one group") + require.Equal(1, client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Equal(1, client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + + t.Log("delete inverse should delete association") + client.Group.DeleteOne(hub).ExecX(ctx) + require.False(foo.QueryGroups().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Zero(client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + + t.Log("add user to groups updating existing users") + hub = client.Group.Create().SetName("Github").SetExpire(time.Now()).SetInfo(inf).SaveX(ctx) + require.False(foo.QueryGroups().ExistX(ctx)) + foo.Update().AddGroups(hub).ExecX(ctx) + require.Equal(foo.Name, hub.QueryUsers().OnlyX(ctx).Name, "group has only one user") + require.Equal(hub.Name, foo.QueryGroups().OnlyX(ctx).Name, "user is connected to one group") + require.Equal(1, client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Equal(1, client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + + t.Log("delete assoc should delete inverse as well") + client.User.DeleteOne(foo).ExecX(ctx) + require.False(hub.QueryUsers().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Zero(client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + // add back the user. + foo = client.User.Create().SetAge(10).SetName("foo").AddGroups(hub).SaveX(ctx) + + t.Log("remove following using update (assoc)") + foo.Update().RemoveGroups(hub).ExecX(ctx) + require.False(foo.QueryGroups().ExistX(ctx)) + require.False(hub.QueryUsers().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Zero(client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + // join back to group. + foo.Update().AddGroups(hub).ExecX(ctx) + + t.Log("remove following using update (inverse)") + hub.Update().RemoveUsers(foo).ExecX(ctx) + require.False(foo.QueryGroups().ExistX(ctx)) + require.False(hub.QueryUsers().ExistX(ctx)) + require.Zero(client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Zero(client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + // add back the user. + hub.Update().AddUsers(foo).ExecX(ctx) + + t.Log("multiple groups and users") + lab := client.Group.Create().SetName("Gitlab").SetExpire(time.Now()).SetInfo(inf).SaveX(ctx) + bar := client.User.Create().SetAge(10).SetName("bar").SaveX(ctx) + require.Equal(1, client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Equal(1, client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + bar.Update().AddGroups(lab).ExecX(ctx) + require.Equal(2, client.User.Query().Where(user.HasGroups()).CountX(ctx)) + require.Equal(2, client.Group.Query().Where(group.HasUsers()).CountX(ctx)) + // validate relations. + require.Equal(foo.Name, hub.QueryUsers().OnlyX(ctx).Name, "hub has only one user") + require.Equal(hub.Name, foo.QueryGroups().OnlyX(ctx).Name, "foo is connected only to hub") + require.Equal(bar.Name, lab.QueryUsers().OnlyX(ctx).Name, "lab has only one user") + require.Equal(lab.Name, bar.QueryGroups().OnlyX(ctx).Name, "bar is connected only to lab") + // add bar to hub. + bar.Update().AddGroups(hub).ExecX(ctx) + require.Equal(2, hub.QueryUsers().CountX(ctx)) + require.Equal(1, lab.QueryUsers().CountX(ctx)) + require.Equal([]string{bar.Name, foo.Name}, hub.QueryUsers().Order(ent.Asc(user.FieldName)).GroupBy(user.FieldName).StringsX(ctx)) + require.Equal([]string{hub.Name, lab.Name}, bar.QueryGroups().Order(ent.Asc(user.FieldName)).GroupBy(user.FieldName).StringsX(ctx)) + + t.Log("query with side lookup from inverse") + require.Equal(hub.Name, hub.QueryUsers().QueryGroups().Where(group.Name(hub.Name)).OnlyX(ctx).Name, "should get itself") + require.Equal(bar.Name, lab.QueryUsers().QueryGroups().Where(ent.Not(group.Name(hub.Name))).QueryUsers().OnlyX(ctx).Name, "should get its user") + + t.Log("query with side lookup from assoc") + require.Equal(bar.Name, bar.QueryGroups().Where(group.Name(lab.Name)).QueryUsers().OnlyX(ctx).Name, "should get itself") + require.Equal(lab.Name, bar.QueryGroups().Where(group.Name(lab.Name)).QueryUsers().QueryGroups().Where(group.Name(lab.Name)).OnlyX(ctx).Name, "should get its group") + + t.Log("query path from a user") + require.Equal( + hub.Name, + bar. + // hub. + QueryGroups(). + Where( + group.HasUsersWith(user.Name(foo.Name)), + ). + // foo (not having group with name "lab"). + QueryUsers(). + Where( + ent.Not( + user.HasGroupsWith(group.Name(lab.Name)), + ), + ). + // hub. + QueryGroups(). + OnlyX(ctx).Name, + ) + + t.Log("query path from a client") + require.Equal( + bar.Name, + client.Group. + // hub. + Query(). + Where( + group.HasUsersWith(user.Name(foo.Name)), + ). + // foo (not having group with name "lab"). + QueryUsers(). + Where( + ent.Not( + user.HasGroupsWith(group.Name(lab.Name)), + ), + ). + // hub. + QueryGroups(). + // bar, foo. + QueryUsers(). + Order(ent.Asc(user.FieldName)). + // bar + FirstX(ctx).Name, + ) + +} + +func Tx(t *testing.T, client *ent.Client) { + ctx := context.Background() + require := require.New(t) + + tx, err := client.Tx(ctx) + require.NoError(err) + + tx.Node.Create().SaveX(ctx) + + require.NoError(tx.Rollback()) + require.Zero(client.Node.Query().CountX(ctx), "rollback should discard all changes") + + tx, err = client.Tx(ctx) + require.NoError(err) + + nde := tx.Node.Create().SaveX(ctx) + + require.NoError(tx.Commit()) + require.Error(tx.Commit(), "should return an error on the second call") + require.NotZero(client.Node.Query().CountX(ctx), "commit should save all changes") + _, err = nde.QueryNext().Count(ctx) + require.Error(err, "should not be able to query after tx was closed") + require.Zero(nde.Unwrap().QueryNext().CountX(ctx), "should be able to query the entity after wrap") + + tx, err = client.Tx(ctx) + require.NoError(err) + _, err = tx.Client().Tx(ctx) + require.Error(err, "cannot start a transaction within a transaction") + require.NoError(tx.Rollback()) +} + +func drop(t *testing.T, client *ent.Client) { + t.Log("drop data from database") + ctx := context.Background() + client.Pet.Delete().ExecX(ctx) + client.Card.Delete().ExecX(ctx) + client.Node.Delete().ExecX(ctx) + client.User.Delete().ExecX(ctx) + client.Group.Delete().ExecX(ctx) + client.Comment.Delete().ExecX(ctx) + client.GroupInfo.Delete().ExecX(ctx) +} diff --git a/entc/integration/plugin/README.md b/entc/integration/plugin/README.md new file mode 100644 index 000000000..a1ac311dd --- /dev/null +++ b/entc/integration/plugin/README.md @@ -0,0 +1,8 @@ +### Example plugins for testing purpose + +#### Generating new assets for plugin tests + +From `plugin` directory, run: +``` +go run ../../cmd/entc/entc.go generate ./ent/schema +``` \ No newline at end of file diff --git a/entc/integration/plugin/ent/boring.go b/entc/integration/plugin/ent/boring.go new file mode 100644 index 000000000..043ff439e --- /dev/null +++ b/entc/integration/plugin/ent/boring.go @@ -0,0 +1,125 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "bytes" + "fmt" + "strconv" + + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" +) + +// Boring is the model entity for the Boring schema. +type Boring struct { + config + // ID of the ent. + ID string `json:"id,omitempty"` +} + +// FromResponse scans the gremlin response data into Boring. +func (b *Boring) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vb struct { + ID string `json:"id,omitempty"` + } + if err := vmap.Decode(&vb); err != nil { + return err + } + b.ID = vb.ID + return nil +} + +// FromRows scans the sql response data into Boring. +func (b *Boring) FromRows(rows *sql.Rows) error { + var vb struct { + ID int + } + // the order here should be the same as in the `boring.Columns`. + if err := rows.Scan( + &vb.ID, + ); err != nil { + return err + } + b.ID = strconv.Itoa(vb.ID) + return nil +} + +// Update returns a builder for updating this Boring. +// Note that, you need to call Boring.Unwrap() before calling this method, if this Boring +// was returned from a transaction, and the transaction was committed or rolled back. +func (b *Boring) Update() *BoringUpdateOne { + return (&BoringClient{b.config}).UpdateOne(b) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (b *Boring) Unwrap() *Boring { + tx, ok := b.config.driver.(*txDriver) + if !ok { + panic("ent: Boring is not a transactional entity") + } + b.config.driver = tx.drv + return b +} + +// String implements the fmt.Stringer. +func (b *Boring) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Boring(") + buf.WriteString(fmt.Sprintf("id=%v,", b.ID)) + buf.WriteString(")") + return buf.String() +} + +// id returns the int representation of the ID field. +func (b *Boring) id() int { + id, _ := strconv.Atoi(b.ID) + return id +} + +// Borings is a parsable slice of Boring. +type Borings []*Boring + +// FromResponse scans the gremlin response data into Borings. +func (b *Borings) FromResponse(res *gremlin.Response) error { + vmap, err := res.ReadValueMap() + if err != nil { + return err + } + var vb []struct { + ID string `json:"id,omitempty"` + } + if err := vmap.Decode(&vb); err != nil { + return err + } + for _, v := range vb { + *b = append(*b, &Boring{ + ID: v.ID, + }) + } + return nil +} + +// FromRows scans the sql response data into Borings. +func (b *Borings) FromRows(rows *sql.Rows) error { + for rows.Next() { + vb := &Boring{} + if err := vb.FromRows(rows); err != nil { + return err + } + *b = append(*b, vb) + } + return nil +} + +func (b Borings) config(cfg config) { + for i := range b { + b[i].config = cfg + } +} diff --git a/entc/integration/plugin/ent/boring/boring.go b/entc/integration/plugin/ent/boring/boring.go new file mode 100644 index 000000000..bde408b41 --- /dev/null +++ b/entc/integration/plugin/ent/boring/boring.go @@ -0,0 +1,17 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package boring + +const ( + // Label holds the string label denoting the boring type in the database. + Label = "boring" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // Table holds the table name of the boring in the database. + Table = "borings" +) + +// Columns holds all SQL columns are boring fields. +var Columns = []string{ + FieldID, +} diff --git a/entc/integration/plugin/ent/boring/where.go b/entc/integration/plugin/ent/boring/where.go new file mode 100644 index 000000000..138f53102 --- /dev/null +++ b/entc/integration/plugin/ent/boring/where.go @@ -0,0 +1,156 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package boring + +import ( + "strconv" + + "fbc/ent" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/p" +) + +// ID filters vertices based on their identifier. +func ID(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + id, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), id)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(id) + }, + } +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.EQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.EQ(id)) + }, + } +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.NEQ(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.NEQ(id)) + }, + } +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GT(id)) + }, + } +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.GTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.GTE(id)) + }, + } +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LT(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LT(id)) + }, + } +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + v, _ := strconv.Atoi(id) + s.Where(sql.LTE(s.C(FieldID), v)) + }, + Gremlin: func(t *dsl.Traversal) { + t.HasID(p.LTE(id)) + }, + } +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.In(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Within(v...)) + }, + } +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i], _ = strconv.Atoi(ids[i]) + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }, + Gremlin: func(t *dsl.Traversal) { + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + t.HasID(p.Without(v...)) + }, + } +} diff --git a/entc/integration/plugin/ent/boring_create.go b/entc/integration/plugin/ent/boring_create.go new file mode 100644 index 000000000..fa5426d45 --- /dev/null +++ b/entc/integration/plugin/ent/boring_create.go @@ -0,0 +1,90 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "strconv" + + "fbc/ent/entc/integration/plugin/ent/boring" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// BoringCreate is the builder for creating a Boring entity. +type BoringCreate struct { + config +} + +// Save creates the Boring in the database. +func (bc *BoringCreate) Save(ctx context.Context) (*Boring, error) { + switch bc.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bc.sqlSave(ctx) + case dialect.Neptune: + return bc.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX calls Save and panics if Save returns an error. +func (bc *BoringCreate) SaveX(ctx context.Context) *Boring { + v, err := bc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +func (bc *BoringCreate) sqlSave(ctx context.Context) (*Boring, error) { + var ( + res sql.Result + b = &Boring{config: bc.config} + ) + tx, err := bc.driver.Tx(ctx) + if err != nil { + return nil, err + } + builder := sql.Insert(boring.Table).Default(bc.driver.Dialect()) + query, args := builder.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return nil, rollback(tx, err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, rollback(tx, err) + } + b.ID = strconv.FormatInt(id, 10) + if err := tx.Commit(); err != nil { + return nil, err + } + return b, nil +} + +func (bc *BoringCreate) gremlinSave(ctx context.Context) (*Boring, error) { + res := &gremlin.Response{} + query, bindings := bc.gremlin().Query() + if err := bc.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + b := &Boring{config: bc.config} + if err := b.FromResponse(res); err != nil { + return nil, err + } + return b, nil +} + +func (bc *BoringCreate) gremlin() *dsl.Traversal { + v := g.AddV(boring.Label) + return v.ValueMap(true) +} diff --git a/entc/integration/plugin/ent/boring_delete.go b/entc/integration/plugin/ent/boring_delete.go new file mode 100644 index 000000000..04e19e43c --- /dev/null +++ b/entc/integration/plugin/ent/boring_delete.go @@ -0,0 +1,88 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + + "fbc/ent/entc/integration/plugin/ent/boring" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// BoringDelete is the builder for deleting a Boring entity. +type BoringDelete struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (bd *BoringDelete) Where(ps ...ent.Predicate) *BoringDelete { + bd.predicates = append(bd.predicates, ps...) + return bd +} + +// Exec executes the deletion query. +func (bd *BoringDelete) Exec(ctx context.Context) error { + switch bd.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bd.sqlExec(ctx) + case dialect.Neptune: + return bd.gremlinExec(ctx) + default: + return errors.New("ent: unsupported dialect") + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (bd *BoringDelete) ExecX(ctx context.Context) { + if err := bd.Exec(ctx); err != nil { + panic(err) + } +} + +func (bd *BoringDelete) sqlExec(ctx context.Context) error { + var res sql.Result + selector := sql.Select().From(sql.Table(boring.Table)) + for _, p := range bd.predicates { + p.SQL(selector) + } + query, args := sql.Delete(boring.Table).FromSelect(selector).Query() + return bd.driver.Exec(ctx, query, args, &res) +} + +func (bd *BoringDelete) gremlinExec(ctx context.Context) error { + res := &gremlin.Response{} + query, bindings := bd.gremlin().Query() + return bd.driver.Exec(ctx, query, bindings, res) +} + +func (bd *BoringDelete) gremlin() *dsl.Traversal { + t := g.V().HasLabel(boring.Label) + for _, p := range bd.predicates { + p.Gremlin(t) + } + return t.Drop() +} + +// BoringDeleteOne is the builder for deleting a single Boring entity. +type BoringDeleteOne struct { + bd *BoringDelete +} + +// Exec executes the deletion query. +func (bdo *BoringDeleteOne) Exec(ctx context.Context) error { + return bdo.bd.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (bdo *BoringDeleteOne) ExecX(ctx context.Context) { + bdo.bd.ExecX(ctx) +} diff --git a/entc/integration/plugin/ent/boring_query.go b/entc/integration/plugin/ent/boring_query.go new file mode 100644 index 000000000..8248ddeb1 --- /dev/null +++ b/entc/integration/plugin/ent/boring_query.go @@ -0,0 +1,585 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/plugin/ent/boring" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// BoringQuery is the builder for querying Boring entities. +type BoringQuery struct { + config + limit *int + order []Order + unique []string + predicates []ent.Predicate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Where adds a new predicate for the builder. +func (bq *BoringQuery) Where(ps ...ent.Predicate) *BoringQuery { + bq.predicates = append(bq.predicates, ps...) + return bq +} + +// Limit adds a limit step to the query. +func (bq *BoringQuery) Limit(limit int) *BoringQuery { + bq.limit = &limit + return bq +} + +// Order adds an order step to the query. +func (bq *BoringQuery) Order(o ...Order) *BoringQuery { + bq.order = append(bq.order, o...) + return bq +} + +// Get returns a Boring entity by its id. +func (bq *BoringQuery) Get(ctx context.Context, id string) (*Boring, error) { + return bq.Where(boring.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (bq *BoringQuery) GetX(ctx context.Context, id string) *Boring { + b, err := bq.Get(ctx, id) + if err != nil { + panic(err) + } + return b +} + +// First returns the first Boring entity in the query. Returns *ErrNotFound when no boring was found. +func (bq *BoringQuery) First(ctx context.Context) (*Boring, error) { + bs, err := bq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(bs) == 0 { + return nil, &ErrNotFound{boring.Label} + } + return bs[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (bq *BoringQuery) FirstX(ctx context.Context) *Boring { + b, err := bq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return b +} + +// FirstID returns the first Boring id in the query. Returns *ErrNotFound when no id was found. +func (bq *BoringQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = bq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &ErrNotFound{boring.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (bq *BoringQuery) FirstXID(ctx context.Context) string { + id, err := bq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Boring entity in the query, returns an error if not exactly one entity was returned. +func (bq *BoringQuery) Only(ctx context.Context) (*Boring, error) { + bs, err := bq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(bs) { + case 1: + return bs[0], nil + case 0: + return nil, &ErrNotFound{boring.Label} + default: + return nil, &ErrNotSingular{boring.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (bq *BoringQuery) OnlyX(ctx context.Context) *Boring { + b, err := bq.Only(ctx) + if err != nil { + panic(err) + } + return b +} + +// OnlyID returns the only Boring id in the query, returns an error if not exactly one id was returned. +func (bq *BoringQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = bq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &ErrNotFound{boring.Label} + default: + err = &ErrNotSingular{boring.Label} + } + return +} + +// OnlyXID is like OnlyID, but panics if an error occurs. +func (bq *BoringQuery) OnlyXID(ctx context.Context) string { + id, err := bq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Borings. +func (bq *BoringQuery) All(ctx context.Context) ([]*Boring, error) { + switch bq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bq.sqlAll(ctx) + case dialect.Neptune: + return bq.gremlinAll(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// AllX is like All, but panics if an error occurs. +func (bq *BoringQuery) AllX(ctx context.Context) []*Boring { + bs, err := bq.All(ctx) + if err != nil { + panic(err) + } + return bs +} + +// IDs executes the query and returns a list of Boring ids. +func (bq *BoringQuery) IDs(ctx context.Context) ([]string, error) { + switch bq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bq.sqlIDs(ctx) + case dialect.Neptune: + return bq.gremlinIDs(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// IDsX is like IDs, but panics if an error occurs. +func (bq *BoringQuery) IDsX(ctx context.Context) []string { + ids, err := bq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (bq *BoringQuery) Count(ctx context.Context) (int, error) { + switch bq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bq.sqlCount(ctx) + case dialect.Neptune: + return bq.gremlinCount(ctx) + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// CountX is like Count, but panics if an error occurs. +func (bq *BoringQuery) CountX(ctx context.Context) int { + count, err := bq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (bq *BoringQuery) Exist(ctx context.Context) (bool, error) { + switch bq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bq.sqlExist(ctx) + case dialect.Neptune: + return bq.gremlinExist(ctx) + default: + return false, errors.New("ent: unsupported dialect") + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (bq *BoringQuery) ExistX(ctx context.Context) bool { + exist, err := bq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +func (bq *BoringQuery) GroupBy(field string, fields ...string) *BoringGroupBy { + group := &BoringGroupBy{config: bq.config} + group.fields = append([]string{field}, fields...) + switch bq.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + group.sql = bq.sqlQuery() + case dialect.Neptune: + group.gremlin = bq.gremlinQuery() + } + return group +} + +func (bq *BoringQuery) sqlAll(ctx context.Context) ([]*Boring, error) { + rows := &sql.Rows{} + selector := bq.sqlQuery() + if unique := bq.unique; len(unique) == 0 { + selector.Distinct() + } + query, args := selector.Query() + if err := bq.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var bs Borings + if err := bs.FromRows(rows); err != nil { + return nil, err + } + bs.config(bq.config) + return bs, nil +} + +func (bq *BoringQuery) sqlCount(ctx context.Context) (int, error) { + rows := &sql.Rows{} + selector := bq.sqlQuery() + unique := []string{boring.FieldID} + if len(bq.unique) > 0 { + unique = bq.unique + } + selector.Count(sql.Distinct(selector.Columns(unique...)...)) + query, args := selector.Query() + if err := bq.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("ent: no rows found") + } + var n int + if err := rows.Scan(&n); err != nil { + return 0, fmt.Errorf("ent: failed reading count: %v", err) + } + return n, nil +} + +func (bq *BoringQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := bq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (bq *BoringQuery) sqlIDs(ctx context.Context) ([]string, error) { + vs, err := bq.sqlAll(ctx) + if err != nil { + return nil, err + } + var ids []string + for _, v := range vs { + ids = append(ids, v.ID) + } + return ids, nil +} + +func (bq *BoringQuery) sqlQuery() *sql.Selector { + t1 := sql.Table(boring.Table) + selector := sql.Select(t1.Columns(boring.Columns...)...).From(t1) + if bq.sql != nil { + selector = bq.sql + selector.Select(selector.Columns(boring.Columns...)...) + } + for _, p := range bq.predicates { + p.SQL(selector) + } + for _, p := range bq.order { + p.SQL(selector) + } + if limit := bq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +func (bq *BoringQuery) gremlinIDs(ctx context.Context) ([]string, error) { + res := &gremlin.Response{} + query, bindings := bq.gremlinQuery().Query() + if err := bq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + vertices, err := res.ReadVertices() + if err != nil { + return nil, err + } + ids := make([]string, 0, len(vertices)) + for _, vertex := range vertices { + ids = append(ids, vertex.ID.(string)) + } + return ids, nil +} + +func (bq *BoringQuery) gremlinAll(ctx context.Context) ([]*Boring, error) { + res := &gremlin.Response{} + query, bindings := bq.gremlinQuery().ValueMap(true).Query() + if err := bq.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + var bs Borings + if err := bs.FromResponse(res); err != nil { + return nil, err + } + bs.config(bq.config) + return bs, nil +} + +func (bq *BoringQuery) gremlinCount(ctx context.Context) (int, error) { + res := &gremlin.Response{} + query, bindings := bq.gremlinQuery().Count().Query() + if err := bq.driver.Exec(ctx, query, bindings, res); err != nil { + return 0, err + } + return res.ReadInt() +} + +func (bq *BoringQuery) gremlinExist(ctx context.Context) (bool, error) { + res := &gremlin.Response{} + query, bindings := bq.gremlinQuery().HasNext().Query() + if err := bq.driver.Exec(ctx, query, bindings, res); err != nil { + return false, err + } + return res.ReadBool() +} + +func (bq *BoringQuery) gremlinQuery() *dsl.Traversal { + v := g.V().HasLabel(boring.Label) + if bq.gremlin != nil { + v = bq.gremlin.Clone() + } + for _, p := range bq.predicates { + p.Gremlin(v) + } + if len(bq.order) > 0 { + v.Order() + for _, p := range bq.order { + p.Gremlin(v) + } + } + if limit := bq.limit; limit != nil { + v.Limit(*limit) + } + if unique := bq.unique; len(unique) == 0 { + v.Dedup() + } + return v +} + +// BoringQuery is the builder for group-by Boring entities. +type BoringGroupBy struct { + config + fields []string + fns []Aggregate + // intermediate queries. + sql *sql.Selector + gremlin *dsl.Traversal +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (bgb *BoringGroupBy) Aggregate(fns ...Aggregate) *BoringGroupBy { + bgb.fns = append(bgb.fns, fns...) + return bgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (bgb *BoringGroupBy) Scan(ctx context.Context, v interface{}) error { + switch bgb.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bgb.sqlScan(ctx, v) + case dialect.Neptune: + return bgb.gremlinScan(ctx, v) + default: + return errors.New("bgb: unsupported dialect") + } +} + +// ScanX is like Scan, but panics if an error occurs. +func (bgb *BoringGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := bgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (bgb *BoringGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BoringGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (bgb *BoringGroupBy) StringsX(ctx context.Context) []string { + v, err := bgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (bgb *BoringGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BoringGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (bgb *BoringGroupBy) IntsX(ctx context.Context) []int { + v, err := bgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (bgb *BoringGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BoringGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (bgb *BoringGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := bgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (bgb *BoringGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BoringGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (bgb *BoringGroupBy) BoolsX(ctx context.Context) []bool { + v, err := bgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +func (bgb *BoringGroupBy) sqlScan(ctx context.Context, v interface{}) error { + rows := &sql.Rows{} + query, args := bgb.sqlQuery().Query() + if err := bgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (bgb *BoringGroupBy) sqlQuery() *sql.Selector { + selector := bgb.sql + columns := make([]string, 0, len(bgb.fields)+len(bgb.fns)) + columns = append(columns, bgb.fields...) + for _, fn := range bgb.fns { + columns = append(columns, fn.SQL(selector)) + } + return selector.Select(columns...).GroupBy(bgb.fields...) +} + +func (bgb *BoringGroupBy) gremlinScan(ctx context.Context, v interface{}) error { + res := &gremlin.Response{} + query, bindings := bgb.gremlinQuery().Query() + if err := bgb.driver.Exec(ctx, query, bindings, res); err != nil { + return err + } + if len(bgb.fields)+len(bgb.fns) == 1 { + return res.ReadVal(v) + } + vm, err := res.ReadValueMap() + if err != nil { + return err + } + return vm.Decode(v) +} + +func (bgb *BoringGroupBy) gremlinQuery() *dsl.Traversal { + var ( + trs []interface{} + names []interface{} + ) + for _, fn := range bgb.fns { + name, tr := fn.Gremlin("p", "") + trs = append(trs, tr) + names = append(names, name) + } + for _, f := range bgb.fields { + names = append(names, f) + trs = append(trs, __.As("p").Unfold().Values(f).As(f)) + } + return bgb.gremlin.Group(). + By(__.Values(bgb.fields...).Fold()). + By(__.Fold().Match(trs...).Select(names...)). + Select(dsl.Values). + Next() +} diff --git a/entc/integration/plugin/ent/boring_update.go b/entc/integration/plugin/ent/boring_update.go new file mode 100644 index 000000000..8c99210e1 --- /dev/null +++ b/entc/integration/plugin/ent/boring_update.go @@ -0,0 +1,231 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "fbc/ent/entc/integration/plugin/ent/boring" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/g" +) + +// BoringUpdate is the builder for updating Boring entities. +type BoringUpdate struct { + config + predicates []ent.Predicate +} + +// Where adds a new predicate for the builder. +func (bu *BoringUpdate) Where(ps ...ent.Predicate) *BoringUpdate { + bu.predicates = append(bu.predicates, ps...) + return bu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (bu *BoringUpdate) Save(ctx context.Context) (int, error) { + switch bu.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return bu.sqlSave(ctx) + case dialect.Neptune: + vertices, err := bu.gremlinSave(ctx) + return len(vertices), err + default: + return 0, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (bu *BoringUpdate) SaveX(ctx context.Context) int { + affected, err := bu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (bu *BoringUpdate) Exec(ctx context.Context) error { + _, err := bu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bu *BoringUpdate) ExecX(ctx context.Context) { + if err := bu.Exec(ctx); err != nil { + panic(err) + } +} + +func (bu *BoringUpdate) sqlSave(ctx context.Context) (n int, err error) { + selector := sql.Select(boring.FieldID).From(sql.Table(boring.Table)) + for _, p := range bu.predicates { + p.SQL(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err = bu.driver.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + if err := rows.Scan(&id); err != nil { + return 0, fmt.Errorf("ent: failed reading id: %v", err) + } + ids = append(ids, id) + } + if len(ids) == 0 { + return 0, nil + } + + tx, err := bu.driver.Tx(ctx) + if err != nil { + return 0, err + } + if err = tx.Commit(); err != nil { + return 0, err + } + return len(ids), nil +} + +func (bu *BoringUpdate) gremlinSave(ctx context.Context) ([]*Boring, error) { + res := &gremlin.Response{} + query, bindings := bu.gremlin().Query() + if err := bu.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + var bs Borings + bs.config(bu.config) + if err := bs.FromResponse(res); err != nil { + return nil, err + } + return bs, nil +} + +func (bu *BoringUpdate) gremlin() *dsl.Traversal { + v := g.V().HasLabel(boring.Label) + for _, p := range bu.predicates { + p.Gremlin(v) + } + var ( + trs []*dsl.Traversal + ) + v.ValueMap(true) + trs = append(trs, v) + return dsl.Join(trs...) +} + +// BoringUpdateOne is the builder for updating a single Boring entity. +type BoringUpdateOne struct { + config + id string +} + +// Save executes the query and returns the updated entity. +func (buo *BoringUpdateOne) Save(ctx context.Context) (*Boring, error) { + switch buo.driver.Dialect() { + case dialect.MySQL, dialect.SQLite: + return buo.sqlSave(ctx) + case dialect.Neptune: + return buo.gremlinSave(ctx) + default: + return nil, errors.New("ent: unsupported dialect") + } +} + +// SaveX is like Save, but panics if an error occurs. +func (buo *BoringUpdateOne) SaveX(ctx context.Context) *Boring { + b, err := buo.Save(ctx) + if err != nil { + panic(err) + } + return b +} + +// Exec executes the query on the entity. +func (buo *BoringUpdateOne) Exec(ctx context.Context) error { + _, err := buo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (buo *BoringUpdateOne) ExecX(ctx context.Context) { + if err := buo.Exec(ctx); err != nil { + panic(err) + } +} + +func (buo *BoringUpdateOne) sqlSave(ctx context.Context) (b *Boring, err error) { + selector := sql.Select(boring.Columns...).From(sql.Table(boring.Table)) + boring.ID(buo.id).SQL(selector) + rows := &sql.Rows{} + query, args := selector.Query() + if err = buo.driver.Query(ctx, query, args, rows); err != nil { + return nil, err + } + defer rows.Close() + var ids []int + for rows.Next() { + var id int + b = &Boring{config: buo.config} + if err := b.FromRows(rows); err != nil { + return nil, fmt.Errorf("ent: failed scanning row into Boring: %v", err) + } + id = b.id() + ids = append(ids, id) + } + switch n := len(ids); { + case n == 0: + return nil, fmt.Errorf("ent: Boring not found with id: %v", buo.id) + case n > 1: + return nil, fmt.Errorf("ent: more than one Boring with the same id: %v", buo.id) + } + + tx, err := buo.driver.Tx(ctx) + if err != nil { + return nil, err + } + if err = tx.Commit(); err != nil { + return nil, err + } + return b, nil +} + +func (buo *BoringUpdateOne) gremlinSave(ctx context.Context) (*Boring, error) { + res := &gremlin.Response{} + query, bindings := buo.gremlin(buo.id).Query() + if err := buo.driver.Exec(ctx, query, bindings, res); err != nil { + return nil, err + } + if err, ok := isConstantError(res); ok { + return nil, err + } + b := &Boring{config: buo.config} + if err := b.FromResponse(res); err != nil { + return nil, err + } + return b, nil +} + +func (buo *BoringUpdateOne) gremlin(id string) *dsl.Traversal { + v := g.V(id) + var ( + trs []*dsl.Traversal + ) + v.ValueMap(true) + trs = append(trs, v) + return dsl.Join(trs...) +} diff --git a/entc/integration/plugin/ent/client.go b/entc/integration/plugin/ent/client.go new file mode 100644 index 000000000..e37385dea --- /dev/null +++ b/entc/integration/plugin/ent/client.go @@ -0,0 +1,99 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "log" + + "fbc/ent/entc/integration/plugin/ent/migrate" + + "fbc/ent/entc/integration/plugin/ent/boring" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Boring is the client for interacting with the Boring builders. + Boring *BoringClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + c := config{log: log.Println} + c.options(opts...) + return &Client{ + config: c, + Schema: migrate.NewSchema(c.driver), + Boring: NewBoringClient(c), + } +} + +// Tx returns a new transactional client. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, fmt.Errorf("ent: cannot start a transaction within a transaction") + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %v", err) + } + cfg := config{driver: tx, log: c.log, verbose: c.verbose} + return &Tx{ + config: cfg, + Boring: NewBoringClient(cfg), + }, nil +} + +// BoringClient is a client for the Boring schema. +type BoringClient struct { + config +} + +// NewBoringClient returns a client for the Boring from the given config. +func NewBoringClient(c config) *BoringClient { + return &BoringClient{config: c} +} + +// Create returns a create builder for Boring. +func (c *BoringClient) Create() *BoringCreate { + return &BoringCreate{config: c.config} +} + +// Update returns an update builder for Boring. +func (c *BoringClient) Update() *BoringUpdate { + return &BoringUpdate{config: c.config} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BoringClient) UpdateOne(b *Boring) *BoringUpdateOne { + return c.UpdateOneID(b.ID) +} + +// UpdateOneID returns an update builder for the given id. +func (c *BoringClient) UpdateOneID(id string) *BoringUpdateOne { + return &BoringUpdateOne{config: c.config, id: id} +} + +// Delete returns a delete builder for Boring. +func (c *BoringClient) Delete() *BoringDelete { + return &BoringDelete{config: c.config} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *BoringClient) DeleteOne(b *Boring) *BoringDeleteOne { + return c.DeleteOneID(b.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *BoringClient) DeleteOneID(id string) *BoringDeleteOne { + return &BoringDeleteOne{c.Delete().Where(boring.ID(id))} +} + +// Create returns a query builder for Boring. +func (c *BoringClient) Query() *BoringQuery { + return &BoringQuery{config: c.config} +} diff --git a/entc/integration/plugin/ent/config.go b/entc/integration/plugin/ent/config.go new file mode 100644 index 000000000..53f33835c --- /dev/null +++ b/entc/integration/plugin/ent/config.go @@ -0,0 +1,51 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "fbc/ent/dialect" +) + +// Option function to configure the client. +type Option func(*config) + +// Config is the configuration for the client and its builder. +type config struct { + // driver is the driver used for execute database requests. + driver dialect.Driver + // verbose enable a verbosity logging. + verbose bool + // log used for logging on verbose mode. + log func(...interface{}) +} + +// Options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.verbose { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Verbose sets the client logging to verbose. +func Verbose() Option { + return func(c *config) { + c.verbose = true + } +} + +// Log sets the client logging to verbose. +func Log(fn func(...interface{})) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} diff --git a/entc/integration/plugin/ent/ent.go b/entc/integration/plugin/ent/ent.go new file mode 100644 index 000000000..87b29d7c1 --- /dev/null +++ b/entc/integration/plugin/ent/ent.go @@ -0,0 +1,349 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strconv" + "strings" + + "fbc/ent" + "fbc/ent/dialect" + "fbc/ent/dialect/sql" + + "fbc/lib/go/gremlin" + "fbc/lib/go/gremlin/encoding/graphson" + "fbc/lib/go/gremlin/graph/dsl" + "fbc/lib/go/gremlin/graph/dsl/__" +) + +// Predicate is an alias to ent.Predicate. +type Predicate = ent.Predicate + +// Or groups list of predicates with the or operator between them. +func Or(predicates ...ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + for i, p := range predicates { + if i > 0 { + s.Or() + } + p.SQL(s) + } + }, + Gremlin: func(tr *dsl.Traversal) { + trs := make([]interface{}, 0, len(predicates)) + for _, p := range predicates { + t := __.New() + p.Gremlin(t) + trs = append(trs, t) + } + tr.Where(__.Or(trs...)) + }, + } +} + +// Not applies the not operator on the given predicate. +func Not(p ent.Predicate) ent.Predicate { + return ent.Predicate{ + SQL: func(s *sql.Selector) { + p.SQL(s.Not()) + }, + Gremlin: func(tr *dsl.Traversal) { + t := __.New() + p.Gremlin(t) + tr.Where(__.Not(t)) + }, + } +} + +// Order applies an ordering on the traversal. +type Order ent.Predicate + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) Order { + return Order{ + SQL: func(s *sql.Selector) { + for _, f := range fields { + s.OrderBy(sql.Asc(f)) + } + }, + Gremlin: func(tr *dsl.Traversal) { + for _, f := range fields { + tr.By(f, dsl.Incr) + } + }, + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) Order { + return Order{ + SQL: func(s *sql.Selector) { + for _, f := range fields { + s.OrderBy(sql.Desc(f)) + } + }, + Gremlin: func(tr *dsl.Traversal) { + for _, f := range fields { + tr.By(f, dsl.Decr) + } + }, + } +} + +// Aggregate applies an aggregation step on the group-by traversal/selector. +type Aggregate struct { + // SQL the column wrapped with the aggregation function. + SQL func(*sql.Selector) string + // Gremlin gets two labels as parameters. The first used in the `As` step for the predicate, + // and the second is an optional name for the next predicates (or for later usage). + Gremlin func(string, string) (string, *dsl.Traversal) +} + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +// +func As(fn Aggregate, end string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.As(fn.SQL(s), end) + }, + Gremlin: func(start, _ string) (string, *dsl.Traversal) { + return fn.Gremlin(start, end) + }, + } +} + +// DefaultCountLabel is the default label name for the Count aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "count" 2 or more fields and avoid conflicting, use the `ent.As(ent.Count(field), "custom_name")` +// function with custom name in order to override it. +const DefaultCountLabel = "count" + +// Count applies the "count" aggregation function on each group. +func Count() Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Count("*") + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultCountLabel + } + return end, __.As(start).Count(dsl.Local).As(end) + }, + } +} + +// DefaultMaxLabel is the default label name for the Max aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "max" 2 or more fields and avoid conflicting, use the `ent.As(ent.Max(field), "custom_name")` +// function with custom name in order to override it. +const DefaultMaxLabel = "max" + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Max(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultMaxLabel + } + return end, __.As(start).Unfold().Values(field).Max().As(end) + }, + } +} + +// DefaultMeanLabel is the default label name for the Mean aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "mean" 2 or more fields and avoid conflicting, use the `ent.As(ent.Mean(field), "custom_name")` +// function with custom name in order to override it. +const DefaultMeanLabel = "mean" + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Avg(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultMeanLabel + } + return end, __.As(start).Unfold().Values(field).Mean().As(end) + }, + } +} + +// DefaultMinLabel is the default label name for the Min aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "min" 2 or more fields and avoid conflicting, use the `ent.As(ent.Min(field), "custom_name")` +// function with custom name in order to override it. +const DefaultMinLabel = "min" + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Min(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultMinLabel + } + return end, __.As(start).Unfold().Values(field).Min().As(end) + }, + } +} + +// DefaultSumLabel is the default label name for the Sum aggregation function. +// It should be used as the struct-tag for decoding, or a map key for interaction with the returned response. +// In order to "sum" 2 or more fields and avoid conflicting, use the `ent.As(ent.Sum(field), "custom_name")` +// function with custom name in order to override it. +const DefaultSumLabel = "sum" + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) Aggregate { + return Aggregate{ + SQL: func(s *sql.Selector) string { + return sql.Sum(s.C(field)) + }, + Gremlin: func(start, end string) (string, *dsl.Traversal) { + if end == "" { + end = DefaultSumLabel + } + return end, __.As(start).Unfold().Values(field).Sum().As(end) + }, + } +} + +// ErrNotFound returns when trying to fetch a specific entity and it was not found in the database. +type ErrNotFound struct { + label string +} + +// Error implements the error interface. +func (e *ErrNotFound) Error() string { + return fmt.Sprintf("ent: %s not found", e.label) +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + _, ok := err.(*ErrNotFound) + return ok +} + +// ErrNotSingular returns when trying to fetch a singular entity and more then one was found in the database. +type ErrNotSingular struct { + label string +} + +// Error implements the error interface. +func (e *ErrNotSingular) Error() string { + return fmt.Sprintf("ent: %s not singular", e.label) +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + _, ok := err.(*ErrNotSingular) + return ok +} + +// ErrConstraintFailed returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or field uniqueness. +type ErrConstraintFailed struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ErrConstraintFailed) Error() string { + return fmt.Sprintf("ent: unique constraint failed: %s", e.msg) +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ErrConstraintFailed) Unwrap() error { + return e.wrap +} + +// Code implements the dsl.Node interface. +func (e ErrConstraintFailed) Code() (string, []interface{}) { + return strconv.Quote(e.prefix() + e.msg), nil +} + +func (e *ErrConstraintFailed) UnmarshalGraphson(b []byte) error { + var v [1]*string + if err := graphson.Unmarshal(b, &v); err != nil { + return err + } + if v[0] == nil { + return fmt.Errorf("ent: missing string value") + } + if !strings.HasPrefix(*v[0], e.prefix()) { + return fmt.Errorf("ent: invalid string for error: %s", *v[0]) + } + e.msg = strings.TrimPrefix(*v[0], e.prefix()) + return nil +} + +// prefix returns the prefix used for gremlin constants. +func (ErrConstraintFailed) prefix() string { return "Error: " } + +// NewErrUniqueField creates a constraint error for unique fields. +func NewErrUniqueField(label, field string, v interface{}) *ErrConstraintFailed { + return &ErrConstraintFailed{msg: fmt.Sprintf("field %s.%s with value: %#v", label, field, v)} +} + +// NewErrUniqueEdge creates a constraint error for unique edges. +func NewErrUniqueEdge(label, edge, id string) *ErrConstraintFailed { + return &ErrConstraintFailed{msg: fmt.Sprintf("edge %s.%s with id: %#v", label, edge, id)} +} + +// IsConstraintFailure returns a boolean indicating whether the error is a constraint failure. +func IsConstraintFailure(err error) bool { + _, ok := err.(*ErrConstraintFailed) + return ok +} + +// isConstantError indicates if the given response holds a gremlin constant containing an error. +func isConstantError(r *gremlin.Response) (*ErrConstraintFailed, bool) { + e := &ErrConstraintFailed{} + if err := graphson.Unmarshal(r.Result.Data, e); err != nil { + return nil, false + } + return e, true +} + +func isSQLConstraintError(err error) (*ErrConstraintFailed, bool) { + // Error number 1062 is ER_DUP_ENTRY in mysql, and "UNIQUE constraint failed" is SQLite prefix. + if msg := err.Error(); strings.HasPrefix(msg, "Error 1062") || strings.HasPrefix(msg, "UNIQUE constraint failed") { + return &ErrConstraintFailed{msg, err}, true + } + return nil, false +} + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%s: %v", err.Error(), rerr) + } + if err, ok := isSQLConstraintError(err); ok { + return err + } + return err +} + +// keys returns the keys/ids from the edge map. +func keys(m map[string]struct{}) []string { + s := make([]string, 0, len(m)) + for id, _ := range m { + s = append(s, id) + } + return s +} diff --git a/entc/integration/plugin/ent/example_test.go b/entc/integration/plugin/ent/example_test.go new file mode 100644 index 000000000..cd30047c2 --- /dev/null +++ b/entc/integration/plugin/ent/example_test.go @@ -0,0 +1,51 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "log" + "net/url" + "os" + + "fbc/ent/dialect" + "fbc/lib/go/gremlin" +) + +// endpoint for the database. In order to run the tests locally, run the following command: +// +// ENT_INTEGRATION_ENDPOINT="http://localhost:8182" go test -v +// +var endpoint *gremlin.Endpoint + +func init() { + if e, ok := os.LookupEnv("ENT_INTEGRATION_ENDPOINT"); ok { + if u, err := url.Parse(e); err == nil { + endpoint = &gremlin.Endpoint{u} + } + } +} + +func ExampleBoring() { + if endpoint == nil { + return + } + ctx := context.Background() + conn, err := gremlin.NewClient(gremlin.Config{Endpoint: *endpoint}) + if err != nil { + log.Fatalf("failed creating database client: %v", err) + } + client := NewClient(Driver(dialect.NewGremlin(conn))) + + // creating vertices for the boring's edges. + + // create boring vertex with its edges. + b := client.Boring. + Create(). + SaveX(ctx) + log.Println("boring created:", b) + + // query edges. + + // Output: +} diff --git a/entc/integration/plugin/ent/migrate/migrate.go b/entc/integration/plugin/ent/migrate/migrate.go new file mode 100644 index 000000000..04f067e78 --- /dev/null +++ b/entc/integration/plugin/ent/migrate/migrate.go @@ -0,0 +1,30 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package migrate + +import ( + "fbc/ent/dialect/sql/schema" + "fbc/ent/field" +) + +var ( + nullable = true + // BoringsColumns holds the columns for the "borings" table. + BoringsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + } + // BoringsTable holds the schema information for the "borings" table. + BoringsTable = &schema.Table{ + Name: "borings", + Columns: BoringsColumns, + PrimaryKey: []*schema.Column{BoringsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{}, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + BoringsTable, + } +) + +func init() { +} diff --git a/entc/integration/plugin/ent/migrate/schema.go b/entc/integration/plugin/ent/migrate/schema.go new file mode 100644 index 000000000..455fce24a --- /dev/null +++ b/entc/integration/plugin/ent/migrate/schema.go @@ -0,0 +1,41 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + + "fbc/ent/dialect" + "fbc/ent/dialect/sql/schema" +) + +// SQLDialect wraps the dialect.Driver with additional migration methods. +type SQLDriver interface { + Create(context.Context, ...*schema.Table) error +} + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv SQLDriver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { + s := &Schema{} + switch drv.Dialect() { + case dialect.MySQL: + s.drv = &schema.MySQL{Driver: drv} + case dialect.SQLite: + s.drv = &schema.SQLite{Driver: drv} + } + return s +} + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context) error { + if s.drv == nil { + return fmt.Errorf("ent/migrate: dialect does not support migration") + } + return s.drv.Create(ctx, Tables...) +} diff --git a/entc/integration/plugin/ent/schema/boring.go b/entc/integration/plugin/ent/schema/boring.go new file mode 100644 index 000000000..63ffbecc0 --- /dev/null +++ b/entc/integration/plugin/ent/schema/boring.go @@ -0,0 +1,18 @@ +package schema + +import "fbc/ent" + +// Boring holds the schema definition for the Boring entity. +type Boring struct { + ent.Schema +} + +// Fields of the Boring. +func (Boring) Fields() []ent.Field { + return nil +} + +// Edges of the Boring. +func (Boring) Edges() []ent.Edge { + return nil +} diff --git a/entc/integration/plugin/ent/tx.go b/entc/integration/plugin/ent/tx.go new file mode 100644 index 000000000..6e5b2e8a4 --- /dev/null +++ b/entc/integration/plugin/ent/tx.go @@ -0,0 +1,101 @@ +// Code generated (@generated) by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "fbc/ent/dialect" + "fbc/ent/entc/integration/plugin/ent/migrate" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Boring is the client for interacting with the Boring builders. + Boring *BoringClient +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + return tx.config.driver.(*txDriver).tx.Commit() +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + return tx.config.driver.(*txDriver).tx.Rollback() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + return &Client{ + config: tx.config, + Schema: migrate.NewSchema(tx.driver), + Boring: NewBoringClient(tx.config), + } +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Boring.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that this driver is safe for concurrent usage, however, it executes only one query +// at the time. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // protects the tx below from concurrent execution. + mu sync.Mutex + // tx is the underlying transaction. + tx dialect.Tx +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args interface{}, v interface{}) error { + tx.mu.Lock() + defer tx.mu.Unlock() + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args interface{}, v interface{}) error { + tx.mu.Lock() + defer tx.mu.Unlock() + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/entc/integration/plugin/plugin_test.go b/entc/integration/plugin/plugin_test.go new file mode 100644 index 000000000..6b0fd6440 --- /dev/null +++ b/entc/integration/plugin/plugin_test.go @@ -0,0 +1,38 @@ +package plugin + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPlugin(t *testing.T) { + plg := "printer.so" + + // build entc plugin. + cmd := exec.Command("go", "build", "-o", plg, "-buildmode", "plugin", "./testdata") + _, err := run(cmd) + require.NoError(t, err) + defer os.Remove(plg) + + // execute entc generate and expect the plugin to be executed. + cmd = exec.Command("go", "run", "../../cmd/entc/entc.go", "generate", "--plugin", plg, "./ent/schema") + out, err := run(cmd) + require.NoError(t, err) + require.Equal(t, "Boring\n", out, "printer plugin should print node names") + +} + +func run(cmd *exec.Cmd) (string, error) { + out := bytes.NewBuffer(nil) + cmd.Stderr = out + cmd.Stdout = out + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("integration/plugin: %s", out) + } + return out.String(), nil +} diff --git a/entc/integration/plugin/testdata/printer.go b/entc/integration/plugin/testdata/printer.go new file mode 100644 index 000000000..07971df91 --- /dev/null +++ b/entc/integration/plugin/testdata/printer.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + + "fbc/ent/entc/gen" + "fbc/ent/entc/plugin" +) + +// Gen is the required plugin symbol. +var Gen = plugin.GeneratorFunc(func(graph *gen.Graph) error { + for _, n := range graph.Nodes { + fmt.Println(n.Name) + } + return nil +}) diff --git a/entc/internal/build/bindata.go b/entc/internal/build/bindata.go new file mode 100644 index 000000000..b6a90f874 --- /dev/null +++ b/entc/internal/build/bindata.go @@ -0,0 +1,246 @@ +// Package build Code generated by go-bindata. (@generated) DO NOT EDIT. +// sources: +// template/build.tmpl +package build + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _templateBuildTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8f\x41\x4b\x03\x31\x10\x85\xcf\x3b\xbf\xe2\x11\x2a\x28\xb4\xe9\xbd\xd0\x9b\x67\x11\x7a\x14\x91\x74\x3b\xbb\x0d\x36\xd9\x75\x93\x8a\x32\xcc\x7f\x97\x64\xd7\x1e\x7a\x0a\x33\x79\xf9\xbe\x3c\x11\x9c\xb8\xf3\x91\x61\x82\xf3\xd1\x40\x95\x46\xd7\x7e\xba\x9e\x51\x16\x44\x3e\x8c\xc3\x94\xf1\x48\x8d\x08\xc6\xc9\xc7\xdc\xc1\x3c\x7c\x19\xd8\xd7\x25\xa7\x4a\xd4\x98\xee\xd8\x6e\x39\x66\x43\x4f\x44\x22\x58\x1d\x5d\x62\xec\xf6\xa8\xe7\x7f\xb6\x44\xb7\x5b\x88\xc0\x1e\x7e\xc3\x71\xb8\x40\x15\x3e\x21\x9f\x19\xe3\xe5\xda\xfb\x88\x6e\x98\xc0\x3f\xc5\xe9\x63\x5f\x2f\xee\xbc\x33\x59\x15\xa9\x3d\x73\x70\x69\x7e\x11\x73\x6b\xe9\xdb\x4d\x77\xec\x3d\xde\xde\x39\x66\x7b\xa8\x59\xa9\x25\x26\x17\x7b\xc6\xea\x63\x8d\x55\x74\xa1\x7e\xd2\xbe\xb8\xc0\xa9\x34\x69\x1a\x91\xcd\xcd\x61\xcb\x70\x93\x27\x99\x31\xbb\xa2\xb3\xcf\xdc\xb9\xeb\x25\xcf\x2b\x35\x0b\x4c\x75\x5d\x25\x1c\x4f\xd8\xa8\x92\xd2\x32\xa8\xfe\x05\x00\x00\xff\xff\x80\xe4\x62\x1c\x6c\x01\x00\x00") + +func templateBuildTmplBytes() ([]byte, error) { + return bindataRead( + _templateBuildTmpl, + "template/build.tmpl", + ) +} + +func templateBuildTmpl() (*asset, error) { + bytes, err := templateBuildTmplBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "template/build.tmpl", size: 364, mode: os.FileMode(420), modTime: time.Unix(1554652414, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "template/build.tmpl": templateBuildTmpl, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "template": &bintree{nil, map[string]*bintree{ + "build.tmpl": &bintree{templateBuildTmpl, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/entc/internal/build/build.go b/entc/internal/build/build.go new file mode 100644 index 000000000..566c8a7bc --- /dev/null +++ b/entc/internal/build/build.go @@ -0,0 +1,148 @@ +// Package build is the interface for loading schema package into a Go plugin. +package build + +import ( + "bytes" + "fbc/ent" + "fmt" + "go/format" + "go/types" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "plugin" + "reflect" + "sort" + "strings" + "text/template" + "time" + + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" +) + +// Symbol is the exported "Symbol" of the plugin. +const Symbol = "Names" + +// Plugin holds the plugin build info. +type Plugin struct { + // Path is the path for the Go plugin. + Path string + // PkgPath is the path where the schema package reside. + // Note that path can be either a package path (e.g. github.com/a8m/x) + // or a filepath (e.g. ./ent/schema). + PkgPath string +} + +// Load loads the schemas from the generated plugin. +func (p *Plugin) Load() ([]ent.Schema, error) { + plg, err := plugin.Open(p.Path) + if err != nil { + return nil, errors.WithMessagef(err, "open plugin %s", p.Path) + } + schemas, err := plg.Lookup(Symbol) + if err != nil { + return nil, errors.WithMessagef(err, "find schemas in plugin") + } + return *schemas.(*[]ent.Schema), nil +} + +// Config holds the configuration for package building. +type Config struct { + // Path is the path for the schema package. + Path string + // Names are the schema names to run the code generation on. + // Empty means all schemas in the directory. + Names []string +} + +// Build loads the schemas package and build the Go plugin with this info. +func (c *Config) Build() (*Plugin, error) { + pkgPath, err := c.load() + if err != nil { + return nil, errors.WithMessage(err, "load schemas dir") + } + if len(c.Names) == 0 { + return nil, errors.Errorf("no schema found in: %s", c.Path) + } + b := bytes.NewBuffer(nil) + err = templates.ExecuteTemplate(b, "main", struct { + *Config + Symbol, Package string + }{c, Symbol, pkgPath}) + if err != nil { + return nil, errors.WithMessage(err, "execute template") + } + buf, err := format.Source(b.Bytes()) + if err != nil { + return nil, errors.WithMessage(err, "format template") + } + target := fmt.Sprintf("%s.go", filename(pkgPath)) + if err := ioutil.WriteFile(target, buf, 0644); err != nil { + return nil, errors.WithMessagef(err, "write file %s", target) + } + defer os.Remove(target) + plg := filepath.Join(os.TempDir(), fmt.Sprintf("%s.so", filename(pkgPath))) + cmd := exec.Command("go", "build", "-o", plg, "-buildmode", "plugin", target) + if err := run(cmd); err != nil { + return nil, err + } + return &Plugin{PkgPath: pkgPath, Path: plg}, nil +} + +// load loads the schemas info. +func (c *Config) load() (string, error) { + // get the ent package info statically instead of dealing with string constants + // in the code, since import is handled by goimports and renaming should be easy. + entface := reflect.TypeOf(struct{ ent.Schema }{}).Field(0).Type + pkgs, err := packages.Load(&packages.Config{Mode: packages.LoadSyntax}, c.Path, entface.PkgPath()) + if err != nil { + return "", err + } + entPkg, pkg := pkgs[0], pkgs[1] + if pkgs[0].PkgPath != entface.PkgPath() { + entPkg, pkg = pkgs[1], pkgs[0] + } + names := make([]string, 0) + iface := entPkg.Types.Scope().Lookup(entface.Name()).Type().Underlying().(*types.Interface) + for k, v := range pkg.TypesInfo.Defs { + typ, ok := v.(*types.TypeName) + if !ok || !k.IsExported() || !types.Implements(typ.Type(), iface) { + continue + } + names = append(names, k.Name) + } + if len(c.Names) == 0 { + c.Names = names + } + sort.Strings(c.Names) + return pkg.PkgPath, err +} + +//go:generate go-bindata -pkg=build ./template/... + +var templates = tmpl() + +func tmpl() *template.Template { + t := template.New("templates").Funcs(template.FuncMap{"base": filepath.Base}) + for _, asset := range AssetNames() { + t = template.Must(t.Parse(string(MustAsset(asset)))) + } + return t +} + +func filename(pkg string) string { + name := strings.Replace(pkg, "/", "_", -1) + return fmt.Sprintf("entc_%s_%d", name, time.Now().Unix()) +} + +// Run runs an exec command and returns the stderr if it failed. +func run(cmd *exec.Cmd) error { + out := bytes.NewBuffer(nil) + cmd.Stderr = out + if err := cmd.Run(); err != nil { + return fmt.Errorf("entc/internal/build: %s", out) + } + return nil +} diff --git a/entc/internal/build/build_test.go b/entc/internal/build/build_test.go new file mode 100644 index 000000000..7e3eff5f6 --- /dev/null +++ b/entc/internal/build/build_test.go @@ -0,0 +1,40 @@ +package build + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuild(t *testing.T) { + cfg := &Config{Path: "./testdata/valid"} + plg, err := cfg.Build() + require.NoError(t, err) + schemas, err := plg.Load() + require.NoError(t, err) + require.Len(t, schemas, 3) + require.Equal(t, "fbc/ent/entc/internal/build/testdata/valid", plg.PkgPath) +} + +func TestBuildWrongPath(t *testing.T) { + cfg := &Config{Path: "./boring"} + plg, err := cfg.Build() + require.Error(t, err) + require.Nil(t, plg) +} + +func TestBuildSpecific(t *testing.T) { + cfg := &Config{Path: "./testdata/valid", Names: []string{"User"}} + plg, err := cfg.Build() + require.NoError(t, err) + schemas, err := plg.Load() + require.NoError(t, err) + require.Len(t, schemas, 1) +} + +func TestBuildNoSchema(t *testing.T) { + cfg := &Config{Path: "./testdata/invalid"} + plg, err := cfg.Build() + require.Error(t, err) + require.Nil(t, plg) +} diff --git a/entc/internal/build/template/build.tmpl b/entc/internal/build/template/build.tmpl new file mode 100644 index 000000000..369482f01 --- /dev/null +++ b/entc/internal/build/template/build.tmpl @@ -0,0 +1,18 @@ +{{ define "main" }} +package main + +import ( + {{ printf "%q" .Package }} + + "fbc/ent" +) + +{{ $base := base .Package}} + +// {{ .Symbol }} is the plugin for exporting the {{ printf "%q" $base }} schemas for entc. +var {{ .Symbol }} = []ent.Schema{ + {{ range $_, $name := .Names }} + {{- $base }}.{{- printf "%s{Schema: ent.DefaultSchema}" $name }}, + {{ end -}} +} +{{ end }} \ No newline at end of file diff --git a/entc/internal/build/testdata/invalid/schema.go b/entc/internal/build/testdata/invalid/schema.go new file mode 100644 index 000000000..66fff149c --- /dev/null +++ b/entc/internal/build/testdata/invalid/schema.go @@ -0,0 +1,4 @@ +package invalid + +// User is invalid schema. +type User struct{} diff --git a/entc/internal/build/testdata/valid/schema.go b/entc/internal/build/testdata/valid/schema.go new file mode 100644 index 000000000..57c778047 --- /dev/null +++ b/entc/internal/build/testdata/valid/schema.go @@ -0,0 +1,18 @@ +package valid + +import "fbc/ent" + +// User holds the user schema. +type User struct { + ent.Schema +} + +// Group holds the group schema. +type Group struct { + ent.Schema +} + +// Tag holds the tag schema. +type Tag struct { + ent.Schema +} diff --git a/entc/plugin/plugin.go b/entc/plugin/plugin.go new file mode 100644 index 000000000..cd5fb4539 --- /dev/null +++ b/entc/plugin/plugin.go @@ -0,0 +1,109 @@ +// Package plugin provides a way to extend entc via plugins. +// Plugin can be either a Go plugin that is loaded and executed +// by entc runtime or a standalone program. +package plugin + +import ( + "os" + "path/filepath" + "plugin" + + "fbc/ent/entc/gen" + "fbc/ent/entc/internal/build" + + "github.com/pkg/errors" +) + +// Symbol is the expected symbol name in the provided plugin. +// The plugin.Symbol need to be a type Generator. For example: +// +// package main +// +// var Gen = GeneratorFunc(func(graph *gen.Graph) error { +// return nil +// }) +// +const Symbol = "Gen" + +// Generator is the interface that wrap the Gen method executed by entc. +type Generator interface { + Gen(*gen.Graph) error +} + +// The GeneratorFunc type is an adapter to allow the use of ordinary functions as Generator. +// If f is a function with the appropriate signature, GeneratorFunc(f) is a Generator that calls f. +type GeneratorFunc func(*gen.Graph) error + +// Gen calls f(g). +func (f GeneratorFunc) Gen(g *gen.Graph) error { return f(g) } + +// LoadGraph loads the given schema package from the given path +// and construct a *gen.Graph. The path can be either a package +// path (e.g github.com/a8m/x) or a filepath. +// +// This function used to create a standalone plugin programs that +// want to interact with the ent schemas. An example for usage: +// +// package main +// +// import ( +// "log" +// +// "fbc/ent/entc/plugin" +// ) +// +// func main() { +// graph, err := plugin.LoadGraph("./ent/schema") +// if err != nil { +// log.Fatal(err) +// } +// for _, node := range graph.Nodes { +// log.Println(node.Name) +// } +// } +// +func LoadGraph(path string) (*gen.Graph, error) { + plg, err := (&build.Config{Path: path}).Build() + if err != nil { + return nil, err + } + defer os.Remove(plg.Path) + + schemas, err := plg.Load() + if err != nil { + return nil, err + } + + return gen.NewGraph(gen.Config{Schema: plg.PkgPath, Package: filepath.Dir(plg.PkgPath)}, schemas...) +} + +// MustLoadGraph is like LoadGraph but panics if LoadGraph returns an error. +// It simplifies safe initialization of global variables holding a *gen.Graph. +func MustLoadGraph(path string) *gen.Graph { + graph, err := LoadGraph(path) + if err != nil { + panic(err) + } + return graph +} + +// Exec loads and executes the provided plugin with +// the provided *gen/Graph. +// +// It returns an error if the plugin is invalid or +// it's not fulfilling the entc/plugin interface. +func Exec(path string, graph *gen.Graph) error { + plg, err := plugin.Open(path) + if err != nil { + return errors.WithMessagef(err, "open plugin %s", path) + } + sym, err := plg.Lookup(Symbol) + if err != nil { + return errors.WithMessagef(err, "find symbol (%q) in plugin", Symbol) + } + g, ok := sym.(Generator) + if !ok { + return errors.Errorf("exported symbol %q does not implement the entc/plugin.Generator", Symbol) + } + return g.Gen(graph) +} diff --git a/entc/plugin/plugin_test.go b/entc/plugin/plugin_test.go new file mode 100644 index 000000000..43444fdc1 --- /dev/null +++ b/entc/plugin/plugin_test.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestExecInvalid(t *testing.T) { + err := Exec("./testdata/notfound", nil) + require.Error(t, err, "plugin not found") + + dest := "invalid.so" + require.NoError(t, buildPlg("./testdata/invalid", dest)) + defer os.Remove(dest) + + err = Exec(dest, nil) + require.Error(t, err, "does not implement the entc/plugin interface") +} + +func TestExecValid(t *testing.T) { + err := Exec("./testdata/notfound", nil) + require.Error(t, err, "plugin not found") + + dest := "valid.so" + require.NoError(t, buildPlg("./testdata/valid", dest)) + defer os.Remove(dest) + + err = Exec(dest, nil) + require.NoError(t, err) +} + +func buildPlg(src, dest string) error { + out := bytes.NewBuffer(nil) + cmd := exec.Command("go", "build", "-o", dest, "-buildmode", "plugin", src) + cmd.Stderr = out + if err := cmd.Run(); err != nil { + return fmt.Errorf("entc/plugin: %s", out) + } + return nil +} diff --git a/entc/plugin/testdata/invalid/invalid.go b/entc/plugin/testdata/invalid/invalid.go new file mode 100644 index 000000000..ec9f9e5cb --- /dev/null +++ b/entc/plugin/testdata/invalid/invalid.go @@ -0,0 +1,4 @@ +package main + +// Gen does not implement the plugin.Generator interface. +var Gen = func() {} diff --git a/entc/plugin/testdata/valid/valid.go b/entc/plugin/testdata/valid/valid.go new file mode 100644 index 000000000..abd26f4de --- /dev/null +++ b/entc/plugin/testdata/valid/valid.go @@ -0,0 +1,17 @@ +package main + +import ( + "fbc/ent/entc/gen" +) + +// Generator implements the plugin.Generator interface. +type Generator struct{} + +// Gen implementation. +func (Generator) Gen(*gen.Graph) error { + // logic goes here. + return nil +} + +// Gen is the required plugin symbol. +var Gen = Generator{} diff --git a/field/field.go b/field/field.go new file mode 100644 index 000000000..47a3e18fa --- /dev/null +++ b/field/field.go @@ -0,0 +1,452 @@ +package field + +import ( + "errors" + "regexp" + "strconv" + "strings" +) + +// Type is a field type. +type Type uint + +// Field types. +const ( + TypeInvalid Type = iota + TypeBool + TypeTime + TypeString + TypeInt + TypeInt8 + TypeInt16 + TypeInt32 + TypeInt64 + TypeUint + TypeUint8 + TypeUint16 + TypeUint32 + TypeUint64 + TypeFloat32 + TypeFloat64 + endTypes +) + +func (t Type) String() string { + if int(t) < len(typeNames) { + return typeNames[t] + } + return "type" + strconv.Itoa(int(t)) +} + +// Valid reports if the given type if known type. +func (t Type) Valid() bool { return t > TypeInvalid && t < endTypes } + +// Numeric reports of the given type is a numeric type. +func (t Type) Numeric() bool { return t >= TypeInt && t < endTypes } + +// ConstName returns the constant name of a type. It's used by entc for printing the constant name in templates. +func (t Type) ConstName() string { + if t == TypeTime { + return "TypeTime" + } + return "Type" + strings.Title(t.String()) +} + +var typeNames = [...]string{ + TypeInvalid: "invalid", + TypeBool: "bool", + TypeTime: "time.Time", + TypeString: "string", + TypeInt: "int", + TypeInt8: "int8", + TypeInt16: "int16", + TypeInt32: "int32", + TypeInt64: "int64", + TypeUint: "uint", + TypeUint8: "uint8", + TypeUint16: "uint16", + TypeUint32: "uint32", + TypeUint64: "uint64", + TypeFloat32: "float32", + TypeFloat64: "float64", +} + +// Field represents a field on a graph vertex. +type Field struct { + typ Type + tag string + name string + comment string + unique bool + nullable bool + optional bool + value interface{} + matchers []*regexp.Regexp + validators []interface{} +} + +// Int returns a new Field with type int. +func Int(name string) *intBuilder { return &intBuilder{Field{typ: TypeInt, name: name}} } + +// Float returns a new Field with type float. +func Float(name string) *floatBuilder { return &floatBuilder{Field{typ: TypeFloat64, name: name}} } + +// String returns a new Field with type string. +func String(name string) *stringBuilder { return &stringBuilder{Field{typ: TypeString, name: name}} } + +// Bool returns a new Field with type bool. +func Bool(name string) *boolBuilder { return &boolBuilder{Field{typ: TypeBool, name: name}} } + +// Time returns a new Field with type timestamp. +func Time(name string) *timeBuilder { return &timeBuilder{Field{typ: TypeTime, name: name}} } + +// Type returns the field type. +func (f Field) Type() Type { return f.typ } + +// Name returns the field name. +func (f Field) Name() string { return f.name } + +// HasDefault returns is this field has a default value. +func (f Field) HasDefault() bool { return f.value != nil } + +// Value returns the default value of the field. +func (f Field) Value() interface{} { return f.value } + +// IsNullable returns if this field is an nullable field. Basically, wraps the value with pointer. +func (f Field) IsNullable() bool { return f.nullable } + +// IsOptional returns is this field is an optional field. +func (f Field) IsOptional() bool { return f.optional } + +// IsUnique returns is this field is a unique field. +func (f Field) IsUnique() bool { return f.unique } + +// Validators returns the field matchers. +func (f Field) Validators() []interface{} { return f.validators } + +// Tag returns the struct tag of the field. +func (f Field) Tag() string { return f.tag } + +// intBuilder is the builder for int field. +type intBuilder struct { + Field +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *intBuilder) Range(i, j int) *intBuilder { + b.validators = append(b.validators, func(v int) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *intBuilder) Min(i int) *intBuilder { + b.validators = append(b.validators, func(v int) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *intBuilder) Max(i int) *intBuilder { + b.validators = append(b.validators, func(v int) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *intBuilder) Positive() *intBuilder { + return b.Min(1) +} + +// Negative adds a maximum value validator with the value of -1. Operation fails if the validator fails. +func (b *intBuilder) Negative() *intBuilder { + return b.Max(-1) +} + +// Default sets the default value of the field. +func (b *intBuilder) Default(i int) *intBuilder { + b.value = i + return b +} + +// Nullable indicates that this field is nullable. +// Unlike "Optional", nullable fields are pointers in the generated field. +func (b *intBuilder) Nullable() *intBuilder { + b.nullable = true + return b +} + +// Comment sets the comment of the field. +func (b *intBuilder) Comment(c string) *intBuilder { + b.comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *intBuilder) Optional() *intBuilder { + b.optional = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *intBuilder) StructTag(s string) *intBuilder { + b.tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *intBuilder) Validate(fn func(int) error) *intBuilder { + b.validators = append(b.validators, fn) + return b +} + +// floatBuilder is the builder for float fields. +type floatBuilder struct { + Field +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *floatBuilder) Range(i, j float64) *floatBuilder { + b.validators = append(b.validators, func(v float64) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *floatBuilder) Min(i float64) *floatBuilder { + b.validators = append(b.validators, func(v float64) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *floatBuilder) Max(i float64) *floatBuilder { + b.validators = append(b.validators, func(v float64) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 0.000001. Operation fails if the validator fails. +func (b *floatBuilder) Positive() *floatBuilder { + return b.Min(1e-06) +} + +// Negative adds a maximum value validator with the value of -0.000001. Operation fails if the validator fails. +func (b *floatBuilder) Negative() *floatBuilder { + return b.Max(-1e-06) +} + +// Default sets the default value of the field. +func (b *floatBuilder) Default(i float64) *floatBuilder { + b.value = i + return b +} + +// Nullable indicates that this field is nullable. +// Unlike "Optional", nullable fields are pointers in the generated field. +func (b *floatBuilder) Nullable() *floatBuilder { + b.nullable = true + return b +} + +// Comment sets the comment of the field. +func (b *floatBuilder) Comment(c string) *floatBuilder { + b.comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *floatBuilder) Optional() *floatBuilder { + b.optional = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *floatBuilder) StructTag(s string) *floatBuilder { + b.tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *floatBuilder) Validate(fn func(float64) error) *floatBuilder { + b.validators = append(b.validators, fn) + return b +} + +// stringBuilder is the builder for string fields. +type stringBuilder struct { + Field +} + +// Unique makes the field unique within all vertices of this type. +func (b *stringBuilder) Unique() *stringBuilder { + b.unique = true + return b +} + +// Match adds a regex matcher for this field. Operation fails if the regex fails. +func (b *stringBuilder) Match(re *regexp.Regexp) *stringBuilder { + b.validators = append(b.validators, func(v string) error { + if !re.MatchString(v) { + return errors.New("value does not match validation") + } + return nil + }) + return b +} + +// MinLen adds a length validator for this field. +// Operation fails if the length of the string is less than the given value. +func (b *stringBuilder) MinLen(i int) *stringBuilder { + b.validators = append(b.validators, func(v string) error { + if len(v) < i { + return errors.New("value is less than the required length") + } + return nil + }) + return b +} + +// MaxLen adds a length validator for this field. +// Operation fails if the length of the string is greater than the given value. +func (b *stringBuilder) MaxLen(i int) *stringBuilder { + b.validators = append(b.validators, func(v string) error { + if len(v) > i { + return errors.New("value is less than the required length") + } + return nil + }) + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *stringBuilder) Validate(fn func(string) error) *stringBuilder { + b.validators = append(b.validators, fn) + return b +} + +// Default sets the default value of the field. +func (b *stringBuilder) Default(s string) *stringBuilder { + b.value = s + return b +} + +// Nullable indicates that this field is nullable. +// Unlike "Optional", nullable fields are pointers in the generated field. +func (b *stringBuilder) Nullable() *stringBuilder { + b.nullable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *stringBuilder) Optional() *stringBuilder { + b.optional = true + return b +} + +// Comment sets the comment of the field. +func (b *stringBuilder) Comment(c string) *stringBuilder { + b.comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *stringBuilder) StructTag(s string) *stringBuilder { + b.tag = s + return b +} + +// timeBuilder is the builder for time fields. +type timeBuilder struct { + Field +} + +// Nullable indicates that this field is nullable. +// Unlike "Optional", nullable fields are pointers in the generated field. +func (b *timeBuilder) Nullable() *timeBuilder { + b.nullable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *timeBuilder) Optional() *timeBuilder { + b.optional = true + return b +} + +// Comment sets the comment of the field. +func (b *timeBuilder) Comment(c string) *timeBuilder { + b.comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *timeBuilder) StructTag(s string) *timeBuilder { + b.tag = s + return b +} + +// boolBuilder is the builder for boolean fields. +type boolBuilder struct { + Field +} + +// Default sets the default value of the field. +func (b *boolBuilder) Default(v bool) *boolBuilder { + b.value = v + return b +} + +// Nullable indicates that this field is nullable. +// Unlike "Optional", nullable fields are pointers in the generated field. +func (b *boolBuilder) Nullable() *boolBuilder { + b.nullable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *boolBuilder) Optional() *boolBuilder { + b.optional = true + return b +} + +// Comment sets the comment of the field. +func (b *boolBuilder) Comment(c string) *boolBuilder { + b.comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *boolBuilder) StructTag(s string) *boolBuilder { + b.tag = s + return b +} diff --git a/field/field_test.go b/field/field_test.go new file mode 100644 index 000000000..0f5f44b1f --- /dev/null +++ b/field/field_test.go @@ -0,0 +1,66 @@ +package field_test + +import ( + "fbc/ent/field" + "github.com/stretchr/testify/require" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestInt(t *testing.T) { + f := field.Int("age").Positive() + assert.Equal(t, "age", f.Name()) + assert.Equal(t, field.TypeInt, f.Type()) + assert.Len(t, f.Validators(), 1) + + f = field.Int("age").Default(10).Min(10).Max(20) + assert.True(t, f.HasDefault()) + assert.Equal(t, 10, f.Value()) + assert.Len(t, f.Validators(), 2) + + f = field.Int("age").Range(20, 40).Nullable() + assert.False(t, f.HasDefault()) + assert.True(t, f.IsNullable()) + assert.Len(t, f.Validators(), 1) +} + +func TestFloat(t *testing.T) { + f := field.Float("age").Positive() + assert.Equal(t, "age", f.Name()) + assert.Equal(t, field.TypeFloat64, f.Type()) + assert.Len(t, f.Validators(), 1) + + f = field.Float("age").Min(2.5).Max(5) + assert.Len(t, f.Validators(), 2) +} + +func TestBool(t *testing.T) { + f := field.Bool("active").Default(true) + assert.Equal(t, "active", f.Name()) + assert.Equal(t, field.TypeBool, f.Type()) + assert.True(t, f.HasDefault()) + assert.Equal(t, true, f.Value()) +} + +func TestString(t *testing.T) { + re := regexp.MustCompile("[a-zA-Z0-9]") + f := field.String("name").Unique().Match(re).Validate(func(string) error { return nil }) + assert.Equal(t, field.TypeString, f.Type()) + assert.Equal(t, "name", f.Name()) + assert.True(t, f.IsUnique()) + assert.Len(t, f.Validators(), 2) +} + +func TestTime(t *testing.T) { + f := field.Time("created_at") + assert.Equal(t, "created_at", f.Name()) + assert.Equal(t, field.TypeTime, f.Type()) + assert.Equal(t, "time.Time", f.Type().String()) +} + +func TestField_Tag(t *testing.T) { + f := field.Bool("expired").StructTag(`json:"expired,omitempty"`) + require.Equal(t, `json:"expired,omitempty"`, f.Tag()) +} diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..7e5db6047 --- /dev/null +++ b/go.mod @@ -0,0 +1,22 @@ +module fbc/ent + +replace fbc/lib/go/gremlin => ../lib/go/gremlin + +require ( + fbc/lib/go/gremlin v0.0.0 + github.com/didi/gendry v1.1.1 + github.com/go-openapi/inflect v0.18.0 + github.com/go-sql-driver/mysql v1.4.1-0.20190510102335-877a9775f068 + github.com/google/uuid v1.1.0 + github.com/k0kubun/pp v3.0.1+incompatible + github.com/mattn/go-colorable v0.1.1 // indirect + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/mattn/go-sqlite3 v1.10.0 + github.com/olekukonko/tablewriter v0.0.1 + github.com/pkg/errors v0.8.1 + github.com/russross/meddler v0.0.0-20181122144826-87a225081a7c + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/tools v0.0.0-20190514171724-faff00d7e7f6 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..ba8213ab1 --- /dev/null +++ b/go.sum @@ -0,0 +1,68 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/didi/gendry v1.1.1 h1:5hLPMw9QYravbo8prFNJxK3wVdldDRoqp1tI7v+L7+U= +github.com/didi/gendry v1.1.1/go.mod h1:Csuh34TDGcEbZ0NsewpE6oHGycm2WGyqSovOM0+GBJk= +github.com/go-openapi/inflect v0.18.0 h1:4TMtuIyNxWl29TYpb1grUCuNy+koT0oN5ZXHb6wrZ3E= +github.com/go-openapi/inflect v0.18.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-sql-driver/mysql v1.4.1-0.20190510102335-877a9775f068 h1:q2kwd9Bcgl2QpSi/Wjcx9jzwyICt3EWTP5to43QhwaA= +github.com/go-sql-driver/mysql v1.4.1-0.20190510102335-877a9775f068/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/google/uuid v1.1.0 h1:Jf4mxPC/ziBnoPIdpQdPJ9OeiomAUHLvxmPRSPH9m4s= +github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= +github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/meddler v0.0.0-20181122144826-87a225081a7c h1:+1SzuPPVOpWjb0RvarVKVriWIZ3nqhe8S4aaRAVs/S8= +github.com/russross/meddler v0.0.0-20181122144826-87a225081a7c/go.mod h1:L0qig4K5sCW6YvsjqjPgkKJpwphlhMX1SmjGdcKXbsw= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190327011446-79af862e6737 h1:aEQSysewJq5X0N9MzIyq6qGRAjsIh/V6MJtYQ1xydJ4= +golang.org/x/tools v0.0.0-20190327011446-79af862e6737/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190514171724-faff00d7e7f6 h1:MfRv9P1q9msQtpNPKv0olEj2ueVViE36KqlG4Br8bAs= +golang.org/x/tools v0.0.0-20190514171724-faff00d7e7f6/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=